artificialguybr commited on
Commit
5e45ef8
1 Parent(s): 1e1d43c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -13
app.py CHANGED
@@ -16,18 +16,6 @@ model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
16
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
17
  config = MistralConfig()
18
 
19
- # Postprocess function
20
- def postprocess(self, y):
21
- if y is None:
22
- return []
23
- for i, (message, response) in enumerate(y):
24
- y[i] = (
25
- None if message is None else mdtex2html.convert(message),
26
- None if response is None else mdtex2html.convert(response),
27
- )
28
- return y
29
-
30
- gr.Chatbot.postprocess = postprocess
31
  # Text parsing function
32
  def _parse_text(text):
33
  lines = text.split("\n")
@@ -70,7 +58,7 @@ def _launch_demo(args, model, tokenizer, config):
70
  input_ids = tokenizer.encode(_query, return_tensors='pt')
71
 
72
  # Generate a response using the model
73
- generated_ids = model.generate(input_ids, max_length=100)
74
 
75
  # Decode the generated IDs to text
76
  full_response = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
 
16
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
17
  config = MistralConfig()
18
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  # Text parsing function
20
  def _parse_text(text):
21
  lines = text.split("\n")
 
58
  input_ids = tokenizer.encode(_query, return_tensors='pt')
59
 
60
  # Generate a response using the model
61
+ generated_ids = model.generate(input_ids, max_length=300)
62
 
63
  # Decode the generated IDs to text
64
  full_response = tokenizer.decode(generated_ids[0], skip_special_tokens=True)