junma commited on
Commit
676984e
1 Parent(s): dea8787

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -5
app.py CHANGED
@@ -53,10 +53,18 @@ def get_meta_from_video(Seg_Tracker, input_video, scale_slider, checkpoint):
53
  cap.release()
54
  output_frames = int(total_frames * scale_slider)
55
  frame_interval = max(1, total_frames // output_frames)
56
- ffmpeg.input(input_video, hwaccel='cuda').output(
57
- os.path.join(output_dir, '%07d.jpg'), q=2, start_number=0,
58
- vf=rf'select=not(mod(n\,{frame_interval}))', vsync='vfr'
59
- ).run()
 
 
 
 
 
 
 
 
60
 
61
  first_frame_path = os.path.join(output_dir, '0000000.jpg')
62
  first_frame = cv2.imread(first_frame_path)
@@ -368,6 +376,9 @@ def seg_track_app():
368
  <a href="https://drive.google.com/drive/folders/1EXzRkxZmrXbahCFA8_ImFRM6wQDEpOSe?usp=sharing">
369
  <img src="https://img.shields.io/badge/Video-Tutorial-green?style=plastic" alt="Video Tutorial" style="display:inline-block; margin-right:10px;">
370
  </a>
 
 
 
371
  </div>
372
  <div style="text-align:left; margin-bottom:20px;">
373
  This API supports using box (generated by scribble) and point prompts for video segmentation with
@@ -386,6 +397,11 @@ def seg_track_app():
386
  <li>8. Download the video with segmentation results</li>
387
  </ol>
388
  </div>
 
 
 
 
 
389
  '''
390
  )
391
 
@@ -588,4 +604,4 @@ def seg_track_app():
588
  app.launch(debug=True, enable_queue=True, share=False)
589
 
590
  if __name__ == "__main__":
591
- seg_track_app()
 
53
  cap.release()
54
  output_frames = int(total_frames * scale_slider)
55
  frame_interval = max(1, total_frames // output_frames)
56
+ print(f"frame_interval: {frame_interval}")
57
+ try:
58
+ ffmpeg.input(input_video, hwaccel='cuda').output(
59
+ os.path.join(output_dir, '%07d.jpg'), q=2, start_number=0,
60
+ vf=rf'select=not(mod(n\,{frame_interval}))', fps_mode='vfr'
61
+ ).run()
62
+ except:
63
+ print(f"ffmpeg cuda err")
64
+ ffmpeg.input(input_video).output(
65
+ os.path.join(output_dir, '%07d.jpg'), q=2, start_number=0,
66
+ vf=rf'select=not(mod(n\,{frame_interval}))', fps_mode='vfr'
67
+ ).run()
68
 
69
  first_frame_path = os.path.join(output_dir, '0000000.jpg')
70
  first_frame = cv2.imread(first_frame_path)
 
376
  <a href="https://drive.google.com/drive/folders/1EXzRkxZmrXbahCFA8_ImFRM6wQDEpOSe?usp=sharing">
377
  <img src="https://img.shields.io/badge/Video-Tutorial-green?style=plastic" alt="Video Tutorial" style="display:inline-block; margin-right:10px;">
378
  </a>
379
+ <a href="https://github.com/bowang-lab/MedSAM/tree/MedSAM2?tab=readme-ov-file#fine-tune-sam2-on-the-abdomen-ct-dataset">
380
+ <img src="https://img.shields.io/badge/Fine--tune-SAM2-blue" alt="Fine-tune SAM2" style="display:inline-block; margin-right:10px;">
381
+ </a>
382
  </div>
383
  <div style="text-align:left; margin-bottom:20px;">
384
  This API supports using box (generated by scribble) and point prompts for video segmentation with
 
397
  <li>8. Download the video with segmentation results</li>
398
  </ol>
399
  </div>
400
+ <div style="text-align:left; margin-bottom:20px;">
401
+ Other useful resources:
402
+ <a href="https://ai.meta.com/sam2" target="_blank">Official demo</a> from MetaAI,
403
+ <a href="https://www.youtube.com/watch?v=Dv003fTyO-Y" target="_blank">Video tutorial</a> from Piotr Skalski.
404
+ </div>
405
  '''
406
  )
407
 
 
604
  app.launch(debug=True, enable_queue=True, share=False)
605
 
606
  if __name__ == "__main__":
607
+ seg_track_app()