juancopi81 commited on
Commit
7c7327d
1 Parent(s): d0d8765

Add model and tokenizer in utils

Browse files
Files changed (2) hide show
  1. main.py +1 -1
  2. utils.py +11 -1
main.py CHANGED
@@ -73,7 +73,7 @@ def run():
73
  text_sequence = gr.Text()
74
  empty_sequence = gr.Text(visible=False)
75
  with gr.Row():
76
- num_tokens = gr.Text()
77
  btn_from_scratch.click(
78
  fn=generate_song,
79
  inputs=[genre, temp, empty_sequence, qpm],
 
73
  text_sequence = gr.Text()
74
  empty_sequence = gr.Text(visible=False)
75
  with gr.Row():
76
+ num_tokens = gr.Text(visible=False)
77
  btn_from_scratch.click(
78
  fn=generate_song,
79
  inputs=[genre, temp, empty_sequence, qpm],
utils.py CHANGED
@@ -5,13 +5,23 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
5
  import note_seq
6
  from matplotlib.figure import Figure
7
  from numpy import ndarray
 
8
 
9
  from constants import GM_INSTRUMENTS, SAMPLE_RATE
10
  from string_to_notes import token_sequence_to_note_sequence
11
  from model import get_model_and_tokenizer
12
 
13
 
14
- model, tokenizer = get_model_and_tokenizer()
 
 
 
 
 
 
 
 
 
15
 
16
 
17
  def create_seed_string(genre: str = "OTHER") -> str:
 
5
  import note_seq
6
  from matplotlib.figure import Figure
7
  from numpy import ndarray
8
+ import torch
9
 
10
  from constants import GM_INSTRUMENTS, SAMPLE_RATE
11
  from string_to_notes import token_sequence_to_note_sequence
12
  from model import get_model_and_tokenizer
13
 
14
 
15
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
+
17
+ # Load the tokenizer and the model
18
+ tokenizer = AutoTokenizer.from_pretrained("juancopi81/lmd_8bars_tokenizer")
19
+ model = AutoModelForCausalLM.from_pretrained(
20
+ "juancopi81/lmd-8bars-2048-epochs20_v3"
21
+ )
22
+
23
+ # Move model to device
24
+ model = model.to(device)
25
 
26
 
27
  def create_seed_string(genre: str = "OTHER") -> str: