3v324v23 commited on
Commit
5dcb8ab
1 Parent(s): 2eabe87

beat image, space separated vals support

Browse files
app.py CHANGED
@@ -1,32 +1,64 @@
1
  import gradio as gr, numpy as np
2
- from gradio.components import Audio, Textbox, Checkbox
3
  import beat_manipulator as bm
4
- def BeatSwap(audiofile, pattern: str, scale:float, shift:float, caching:bool):
5
- scale=float(scale)
6
- shift=float(shift)
7
- song=bm.song(path=audiofile, filename=audiofile.split('.')[-2][:-8]+'.'+audiofile.split('.')[-1], caching=caching)
8
- song.quick_beatswap(output=None, pattern=pattern, scale=scale, shift=shift)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  #song.write_audio(output=bm.outputfilename('',song.filename, suffix=' (beatswap)'))
10
- return (song.samplerate, np.asarray(song.audio).T)
 
11
 
12
  audiofile=Audio(source='upload', type='filepath')
13
- patternbox = Textbox(label="Pattern:", placeholder="1, 3, 2, 4!", value="1, 1:1.5, 3, 3:3.5, 5, 5:5.5, 3, 3:3.5, 7, 8", lines=1)
14
- scalebox = Textbox(value=1, label="Beatmap scale, beatmap's beats per minute will be multiplied by this:", placeholder=1, lines=1)
15
  shiftbox = Textbox(value=0, label="Beatmap shift, in beats (applies before scaling):", placeholder=0, lines=1)
16
- cachebox = Checkbox(value=True, label="""Enable caching beatmaps. If True, a text file with the beatmap will be saved to the server (your PC if you are running locally), so that beatswapping for the second time doesn't have to generate the beatmap again.
17
 
18
  Text file will be named after your file, and will only contain a list of numbers with positions of each beat.""")
 
19
 
20
- gr.Interface (fn=BeatSwap,inputs=[audiofile,patternbox,scalebox,shiftbox, cachebox],outputs=Audio(type='numpy'),theme="default",
21
  title = "Stunlocked's Beat Manipulator"
22
- ,description = "Remix music using AI-powered beat detection and advanced beat swapping. https://github.com/stunlocked1/BeatManipulator. Collab version - https://colab.research.google.com/drive/1gEsZCCh2zMKqLmaGH5BPPLrImhEGVhv3?usp=sharing"
 
 
 
 
23
  ,article="""# <h1><p style='text-align: center'><a href='https://github.com/stunlocked1/BeatManipulator' target='_blank'>Github</a></p></h1>
24
 
25
  # Basic usage
26
 
27
  Upload your audio, enter the beat swapping pattern, change scale and shift if needed, and run the app.
28
 
29
- You can test where each beat is by writing `test` into the `pattern` field, which will put cowbells on each beat. Beatmap can sometimes be shifted, for example 0.5 beats forward, so use scale and shift to adjust it.
30
 
31
  Feel free to use complex patterns and very low scales - most of the computation time is in detecting beats, not swapping them.
32
 
@@ -55,7 +87,7 @@ there are certain commands you can write in pattern instead of the actual patter
55
  - `reverse` - reverses the order of all beats
56
  - `test` - test beat detection by putting cowbells on each beat. The highest pitched cowbell should be on the first beat; next cowbell should be on the snare. If it is not, use scale and shift.
57
 
58
- There are also some interesting patterns there: https://github.com/stunlocked1/BeatManipulator/blob/main/presets.json. Those are meant to be used with properly adjusted shift and scale, where 1st beat is 1st kick, 2nd beat is the snare after it.
59
 
60
  Check my soundcloud https://soundcloud.com/stunlocked
61
  """
 
1
  import gradio as gr, numpy as np
2
+ from gradio.components import Audio, Textbox, Checkbox, Image
3
  import beat_manipulator as bm
4
+ import cv2
5
+ def _safer_eval(string:str) -> float:
6
+ if isinstance(string, str):
7
+ string = eval(''.join([i for i in string if i.isdecimal() or i in '.+-*/']))
8
+ return string
9
+
10
+ def BeatSwap(audiofile, pattern: str, scale:float, shift:float, caching:bool, variableBPM:bool):
11
+ print(f'___ PATH = {audiofile} ___')
12
+ scale=_safer_eval(scale)
13
+ shift=_safer_eval(shift)
14
+ if audiofile is not None:
15
+ try:
16
+ song=bm.song(path=audiofile, filename=audiofile.split('.')[-2][:-8]+'.'+audiofile.split('.')[-1], caching=caching)
17
+ except Exception as e:
18
+ print(e)
19
+ song=bm.song(path=audiofile, caching=caching)
20
+ else: print(f'Audiofile is {audiofile}')
21
+ lib = 'madmom.BeatDetectionProcessor' if variableBPM is False else 'madmom.BeatTrackingProcessor'
22
+ song.beatmap.generate(lib=lib, caching=caching)
23
+ try:
24
+ song.beat_image.generate()
25
+ image = song.beat_image.combined
26
+ y=min(len(image), len(image[0]), 2048)
27
+ y=max(y, 2048)
28
+ image = np.clip(cv2.resize(image, (y,y), interpolation=cv2.INTER_NEAREST).T/255, -1, 1)
29
+ print(image)
30
+ except Exception as e:
31
+ print(e)
32
+ image = [[0,0,0],[0,0,0],[0,0,0]]
33
+ song.quick_beatswap(output=None, pattern=pattern, scale=scale, shift=shift, lib=lib)
34
+ song.audio = (np.clip(np.asarray(song.audio), -1, 1) * 32767).astype(np.int16).T
35
  #song.write_audio(output=bm.outputfilename('',song.filename, suffix=' (beatswap)'))
36
+ print('___ SUCCESS ___')
37
+ return ((song.samplerate, song.audio), image)
38
 
39
  audiofile=Audio(source='upload', type='filepath')
40
+ patternbox = Textbox(label="Pattern, comma separated:", placeholder="1, 3, 2, 4!", value="1, 2r, 4, 5, 3, 6r, 8, 7, 9, 11, 12, 13, 15, 13s2, 14s2, 14d9, 16s4, 16s4, 16s2", lines=1)
41
+ scalebox = Textbox(value=0.5, label="Beatmap scale, beatmap's beats per minute will be multiplied by this:", placeholder=1, lines=1)
42
  shiftbox = Textbox(value=0, label="Beatmap shift, in beats (applies before scaling):", placeholder=0, lines=1)
43
+ cachebox = Checkbox(value=True, label="""Enable caching beatmaps. If enabled, a text file with the beatmap will be saved to the server (your PC if you are running locally), so that beatswapping for the second time doesn't have to generate the beatmap again.
44
 
45
  Text file will be named after your file, and will only contain a list of numbers with positions of each beat.""")
46
+ beatdetectionbox = Checkbox(value=False, label='Enable support for variable BPM, however this makes beat detection slightly less accurate')
47
 
48
+ gr.Interface (fn=BeatSwap,inputs=[audiofile,patternbox,scalebox,shiftbox, cachebox, beatdetectionbox],outputs=[Audio(type='numpy'), Image(type='numpy')],theme="default",
49
  title = "Stunlocked's Beat Manipulator"
50
+ ,description = """Remix music using AI-powered beat detection and advanced beat swapping. Make \"every other beat is missing\" remixes, or completely change beat of the song.
51
+
52
+ Github - https://github.com/stunlocked1/BeatManipulator.
53
+
54
+ Colab version - https://colab.research.google.com/drive/1gEsZCCh2zMKqLmaGH5BPPLrImhEGVhv3?usp=sharing"""
55
  ,article="""# <h1><p style='text-align: center'><a href='https://github.com/stunlocked1/BeatManipulator' target='_blank'>Github</a></p></h1>
56
 
57
  # Basic usage
58
 
59
  Upload your audio, enter the beat swapping pattern, change scale and shift if needed, and run the app.
60
 
61
+ It can be useful to test where each beat is by writing `test` into the `pattern` field, which will put cowbells on each beat. Beatmap can sometimes be shifted, for example 0.5 beats forward, so use scale and shift to adjust it.
62
 
63
  Feel free to use complex patterns and very low scales - most of the computation time is in detecting beats, not swapping them.
64
 
 
87
  - `reverse` - reverses the order of all beats
88
  - `test` - test beat detection by putting cowbells on each beat. The highest pitched cowbell should be on the first beat; next cowbell should be on the snare. If it is not, use scale and shift.
89
 
90
+ There are also some interesting patterns there: https://github.com/stunlocked1/BeatManipulator/blob/main/presets.json. Those are meant to be used with properly adjusted shift and scale, where 1st beat is 1st kick, 2nd beat is the snare after it, etc.
91
 
92
  Check my soundcloud https://soundcloud.com/stunlocked
93
  """
beat_manipulator/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (322 Bytes). View file
 
beat_manipulator/__pycache__/analyze.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
beat_manipulator/__pycache__/beatmap.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
beat_manipulator/__pycache__/effect.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
beat_manipulator/__pycache__/generate.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
beat_manipulator/__pycache__/image.cpython-310.pyc ADDED
Binary file (7.19 kB). View file
 
beat_manipulator/__pycache__/main.cpython-310.pyc ADDED
Binary file (27.4 kB). View file
 
beat_manipulator/__pycache__/mix.cpython-310.pyc ADDED
Binary file (1.28 kB). View file
 
beat_manipulator/__pycache__/wrapper.cpython-310.pyc ADDED
Binary file (6.36 kB). View file
 
beat_manipulator/beatmap.py CHANGED
@@ -226,7 +226,7 @@ class beatmap:
226
  if end is not None: self.beatmap=self.beatmap[self.beatmap<=end].astype(int)
227
 
228
  class hitmap(beatmap):
229
- def generate(self, lib='madmom.madmom.RNNBeatProcessor', caching=True):
230
  if self.log is True: print(f'analyzing hits using {lib}; ')
231
  self.hitlib=lib
232
  """Finds positions of actual instrument/drum hits."""
@@ -242,7 +242,7 @@ class hitmap(beatmap):
242
  cached=True
243
  except OSError: cached=False
244
  if cached is False:
245
- if lib=='madmom.RNNBeatProcessor':
246
  import madmom
247
  proc = madmom.features.beats.RNNBeatProcessor()
248
  self.beatmap = proc(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
 
226
  if end is not None: self.beatmap=self.beatmap[self.beatmap<=end].astype(int)
227
 
228
  class hitmap(beatmap):
229
+ def generate(self, lib='madmom.MultiModelSelectionProcessor', caching=True):
230
  if self.log is True: print(f'analyzing hits using {lib}; ')
231
  self.hitlib=lib
232
  """Finds positions of actual instrument/drum hits."""
 
242
  cached=True
243
  except OSError: cached=False
244
  if cached is False:
245
+ if lib=='madmom.RNNBeatProcessor': #broken
246
  import madmom
247
  proc = madmom.features.beats.RNNBeatProcessor()
248
  self.beatmap = proc(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
beat_manipulator/main.py CHANGED
@@ -232,14 +232,18 @@ class song:
232
  # self.hitmap.generate(audio=self.audio, samplerate=self.samplerate, lib=lib, caching=self.caching, filename=self.filename)
233
 
234
  def generate_osu_beatmap(self, difficulties = [0.2, 0.1, 0.08, 0.06, 0.04, 0.02, 0.01, 0.005]):
235
- self.hitmap.osu(self, difficulties = difficulties)
236
  import shutil, os
237
  if self.path is not None:
238
  shutil.copyfile(self.path, 'BeatManipulator_TEMP/'+self.path.split('/')[-1])
239
  else: self.write('BeatManipulator_TEMP/audio.mp3')
240
  shutil.make_archive('BeatManipulator_TEMP', 'zip', 'BeatManipulator_TEMP')
241
- os.rename('BeatManipulator_TEMP.zip', _outputfilename('', self.path, '_'+self.hm, 'osz'))
 
 
 
242
  shutil.rmtree('BeatManipulator_TEMP')
 
243
 
244
  def autotrim(self):
245
  self._printlog(f'autotrimming; ')
@@ -261,7 +265,13 @@ class song:
261
  # get pattern size
262
  size=0
263
  #cut processing??? not worth it, it is really fast anyways
264
- pattern=pattern.replace(' ', '').split(sep)
 
 
 
 
 
 
265
  self._printlog(f"beatswapping with {' '.join(pattern)}; ")
266
  for j in pattern:
267
  s=''
@@ -738,7 +748,7 @@ def delete_beatmap(filename, lib='madmom.BeatDetectionProcessor'):
738
  print('Beatmap deleted.')
739
 
740
 
741
- def _tosong(audio, bmap, samplerate, log):
742
  from .wrapper import _song_copy
743
  if isinstance(audio, str) or audio is None: audio = song(audio, bmap=bmap, log = log)
744
  elif isinstance(audio, list) or isinstance(audio, numpy.ndarray) or isinstance(audio, tuple):
@@ -756,10 +766,18 @@ def beatswap(pattern: str, audio = None, scale: float = 1, shift: float = 0, out
756
  audio = _tosong(audio=audio, bmap=bmap, samplerate=samplerate, log=log)
757
  output = _outputfilename(output=output, filename=audio.path, suffix=suffix)
758
  audio.quick_beatswap(pattern = pattern, scale=scale, shift=shift, output=output)
 
759
 
760
  def generate_beat_image(audio = None, output='', samplerate = None, bmap = None, log = True, ext='png', maximum=4096):
761
  audio = _tosong(audio=audio, bmap=bmap, samplerate=samplerate, log=log)
762
  output = _outputfilename(output=output, filename=audio.path, ext=ext, suffix = '')
763
  audio.beatmap.generate()
764
  audio.beat_image.generate()
765
- audio.beat_image.write(output=output, maximum = maximum)
 
 
 
 
 
 
 
 
232
  # self.hitmap.generate(audio=self.audio, samplerate=self.samplerate, lib=lib, caching=self.caching, filename=self.filename)
233
 
234
  def generate_osu_beatmap(self, difficulties = [0.2, 0.1, 0.08, 0.06, 0.04, 0.02, 0.01, 0.005]):
235
+ self.hitmap.osu(difficulties = difficulties)
236
  import shutil, os
237
  if self.path is not None:
238
  shutil.copyfile(self.path, 'BeatManipulator_TEMP/'+self.path.split('/')[-1])
239
  else: self.write('BeatManipulator_TEMP/audio.mp3')
240
  shutil.make_archive('BeatManipulator_TEMP', 'zip', 'BeatManipulator_TEMP')
241
+ outputname = _outputfilename('', self.path, '_'+self.hitmap.hitlib, 'osz')
242
+ if not os.path.exists(outputname):
243
+ os.rename('BeatManipulator_TEMP.zip', outputname)
244
+ else: print(f'{outputname} already exists!')
245
  shutil.rmtree('BeatManipulator_TEMP')
246
+ self._printlog(f'Wrote {outputname}')
247
 
248
  def autotrim(self):
249
  self._printlog(f'autotrimming; ')
 
265
  # get pattern size
266
  size=0
267
  #cut processing??? not worth it, it is really fast anyways
268
+ if sep != ' ':
269
+ if sep not in pattern: pattern=pattern.replace(' ', sep) # separator not in patterm, e.g. forgot commas
270
+ while f'{sep}{sep}' in pattern: pattern = pattern.replace(f'{sep}{sep}', sep) # double separator
271
+ pattern=pattern.replace(' ', '').split(sep)
272
+ else:
273
+ while ' ' in pattern: pattern = pattern.relace(' ', ' ')
274
+ pattern=pattern.split(sep)
275
  self._printlog(f"beatswapping with {' '.join(pattern)}; ")
276
  for j in pattern:
277
  s=''
 
748
  print('Beatmap deleted.')
749
 
750
 
751
+ def _tosong(audio=None, bmap=None, samplerate=None, log=True):
752
  from .wrapper import _song_copy
753
  if isinstance(audio, str) or audio is None: audio = song(audio, bmap=bmap, log = log)
754
  elif isinstance(audio, list) or isinstance(audio, numpy.ndarray) or isinstance(audio, tuple):
 
766
  audio = _tosong(audio=audio, bmap=bmap, samplerate=samplerate, log=log)
767
  output = _outputfilename(output=output, filename=audio.path, suffix=suffix)
768
  audio.quick_beatswap(pattern = pattern, scale=scale, shift=shift, output=output)
769
+ return audio.path
770
 
771
  def generate_beat_image(audio = None, output='', samplerate = None, bmap = None, log = True, ext='png', maximum=4096):
772
  audio = _tosong(audio=audio, bmap=bmap, samplerate=samplerate, log=log)
773
  output = _outputfilename(output=output, filename=audio.path, ext=ext, suffix = '')
774
  audio.beatmap.generate()
775
  audio.beat_image.generate()
776
+ audio.beat_image.write(output=output, maximum = maximum)
777
+ return output
778
+
779
+ def generate_osu_map(audio = None, samplerate = None, log = True, difficulties = [0.2, 0.1, 0.08, 0.06, 0.04, 0.02, 0.01, 0.005]):
780
+ audio = _tosong(audio=audio, samplerate=samplerate, log=log)
781
+ audio.hitmap.generate()
782
+ audio.generate_osu_beatmap(difficulties=difficulties)
783
+ return audio.path
packages.txt CHANGED
@@ -1,2 +1,3 @@
1
  ffmpeg
2
- cython
 
 
1
  ffmpeg
2
+ cython
3
+ python3-opencv
requirements.txt CHANGED
@@ -8,4 +8,5 @@ soundfile
8
  ffmpeg-python
9
  librosa
10
  pedalboard
 
11
  git+https://github.com/CPJKU/madmom
 
8
  ffmpeg-python
9
  librosa
10
  pedalboard
11
+ opencv-python
12
  git+https://github.com/CPJKU/madmom