fffiloni commited on
Commit
d0db4cc
β€’
1 Parent(s): 8e6038a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -8,14 +8,14 @@ token = os.environ.get('HF_TOKEN')
8
  caption = gr.Blocks.load(name="spaces/SRDdev/Image-Caption")
9
  audio_gen = gr.Blocks.load(name="spaces/fffiloni/audioldm-text-to-audio-generation-clone", api_key=token)
10
 
11
- def infer(image_input, manual_caption):
12
  if manual_caption == "":
13
  cap = caption(image_input, fn_index=0)
14
  print("gpt2 caption: " + cap)
15
  else:
16
  cap = manual_caption
17
  print("manual caption: " + cap)
18
- sound = audio_gen(cap, 10, 2.5, 45, 3, fn_index=0)
19
 
20
  return cap, sound[1], gr.Group.update(visible=True)
21
 
@@ -76,6 +76,7 @@ with gr.Blocks(css="style.css") as demo:
76
 
77
  input_img = gr.Image(type="filepath", elem_id="input-img")
78
  manual_cap = gr.Textbox(label="Manual Image description (optional)", lines=2, placeholder="If you're not happy with sound result, you can manually describe the scene depicted in your image :)")
 
79
  caption_output = gr.Textbox(label="Caption", visible=False, elem_id="text-caption")
80
  sound_output = gr.Audio(label="Result", elem_id="sound-output")
81
 
@@ -88,7 +89,7 @@ with gr.Blocks(css="style.css") as demo:
88
 
89
  gr.HTML(article)
90
 
91
- generate.click(infer, inputs=[input_img, manual_cap], outputs=[caption_output, sound_output, share_group], api_name="i2fx")
92
  share_button.click(None, [], [], _js=share_js)
93
 
94
  demo.queue(max_size=32).launch(debug=True)
 
8
  caption = gr.Blocks.load(name="spaces/SRDdev/Image-Caption")
9
  audio_gen = gr.Blocks.load(name="spaces/fffiloni/audioldm-text-to-audio-generation-clone", api_key=token)
10
 
11
+ def infer(image_input, manual_caption, duration):
12
  if manual_caption == "":
13
  cap = caption(image_input, fn_index=0)
14
  print("gpt2 caption: " + cap)
15
  else:
16
  cap = manual_caption
17
  print("manual caption: " + cap)
18
+ sound = audio_gen(cap, duration, 2.5, 45, 3, fn_index=0)
19
 
20
  return cap, sound[1], gr.Group.update(visible=True)
21
 
 
76
 
77
  input_img = gr.Image(type="filepath", elem_id="input-img")
78
  manual_cap = gr.Textbox(label="Manual Image description (optional)", lines=2, placeholder="If you're not happy with sound result, you can manually describe the scene depicted in your image :)")
79
+ duration_in = gr.Slider(minimum=2, maximum=12, value=6, label="Duration")
80
  caption_output = gr.Textbox(label="Caption", visible=False, elem_id="text-caption")
81
  sound_output = gr.Audio(label="Result", elem_id="sound-output")
82
 
 
89
 
90
  gr.HTML(article)
91
 
92
+ generate.click(infer, inputs=[input_img, manual_cap, duration_in], outputs=[caption_output, sound_output, share_group], api_name="i2fx")
93
  share_button.click(None, [], [], _js=share_js)
94
 
95
  demo.queue(max_size=32).launch(debug=True)