Mr-Vicky-01 commited on
Commit
fe2b97a
1 Parent(s): a4ba5b9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -0
app.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+
5
+ tokenizer = AutoTokenizer.from_pretrained("Mr-Vicky-01/Gemma-2B-Finetuined-pythonCode")
6
+ model = AutoModelForCausalLM.from_pretrained("Mr-Vicky-01/Gemma-2B-Finetuined-pythonCode")
7
+
8
+ def generate_code(text):
9
+ prompt_template = f"""
10
+ <start_of_turn>user based on given instruction create a solution\n\nhere are the instruction {query}
11
+ <end_of_turn>\n<start_of_turn>model
12
+ """
13
+ prompt = prompt_template
14
+ encodeds = tokenizer(prompt, return_tensors="pt", add_special_tokens=True).input_ids
15
+
16
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
17
+ model.to(device)
18
+ inputs = encodeds.to(device)
19
+
20
+
21
+ # Increase max_new_tokens if needed
22
+ generated_ids = model.generate(inputs, max_new_tokens=500, do_sample=False, pad_token_id=tokenizer.eos_token_id)
23
+ ans = ''
24
+ for i in tokenizer.decode(generated_ids[0], skip_special_tokens=True).split('<end_of_turn>')[:2]:
25
+ ans += i
26
+
27
+ # Extract only the model's answer
28
+ model_answer = ans.split("model")[1].strip()
29
+ return model_answer.split("user")[1]
30
+
31
+
32
+ demo = gr.Interface(fn=generate_code, inputs='text',outputs='text',title='Text Summarization')
33
+ demo.launch(debug=True,share=True)