aquibmoin commited on
Commit
927f7a3
1 Parent(s): 4295e2c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -2,8 +2,8 @@ import gradio as gr
2
  from transformers import AutoTokenizer, AutoModel, GPT2LMHeadModel, GPT2Tokenizer
3
  import torch
4
 
5
- # Load the bi-encoder model and tokenizer
6
- bi_encoder_model_name = "sentence-transformers/all-MiniLM-L6-v2"
7
  bi_tokenizer = AutoTokenizer.from_pretrained(bi_encoder_model_name)
8
  bi_model = AutoModel.from_pretrained(bi_encoder_model_name)
9
 
@@ -19,8 +19,8 @@ def encode_text(text):
19
  return outputs.last_hidden_state.mean(dim=1).detach().numpy()
20
 
21
  def generate_response(user_input, context_embedding):
22
- # Combine user input with context embedding for GPT-2 input
23
- combined_input = user_input + " " + context_embedding
24
 
25
  # Generate a response using GPT-2 with adjusted parameters
26
  gpt2_inputs = gpt2_tokenizer.encode(combined_input, return_tensors='pt')
@@ -28,7 +28,7 @@ def generate_response(user_input, context_embedding):
28
  gpt2_inputs,
29
  max_length=150,
30
  num_return_sequences=1,
31
- temperature=0.5,
32
  top_p=0.9,
33
  repetition_penalty=1.2
34
  )
@@ -47,7 +47,7 @@ iface = gr.Interface(
47
  inputs=[gr.Textbox(lines=2, placeholder="Enter your message here..."), gr.Textbox(lines=2, placeholder="Enter context here (optional)...")],
48
  outputs="text",
49
  title="Context-Aware Dynamic Response Chatbot",
50
- description="A chatbot using a bi-encoder model to understand the input context and GPT-2 to generate dynamic responses."
51
  )
52
 
53
  # Launch the interface
@@ -56,3 +56,4 @@ iface.launch()
56
 
57
 
58
 
 
 
2
  from transformers import AutoTokenizer, AutoModel, GPT2LMHeadModel, GPT2Tokenizer
3
  import torch
4
 
5
+ # Load the NASA-specific bi-encoder model and tokenizer
6
+ bi_encoder_model_name = "nasa-impact/nasa-smd-ibm-st-v2"
7
  bi_tokenizer = AutoTokenizer.from_pretrained(bi_encoder_model_name)
8
  bi_model = AutoModel.from_pretrained(bi_encoder_model_name)
9
 
 
19
  return outputs.last_hidden_state.mean(dim=1).detach().numpy()
20
 
21
  def generate_response(user_input, context_embedding):
22
+ # Create a structured prompt for GPT-2
23
+ combined_input = f"Question: {user_input}\nContext: {context_embedding}\nAnswer:"
24
 
25
  # Generate a response using GPT-2 with adjusted parameters
26
  gpt2_inputs = gpt2_tokenizer.encode(combined_input, return_tensors='pt')
 
28
  gpt2_inputs,
29
  max_length=150,
30
  num_return_sequences=1,
31
+ temperature=0.7,
32
  top_p=0.9,
33
  repetition_penalty=1.2
34
  )
 
47
  inputs=[gr.Textbox(lines=2, placeholder="Enter your message here..."), gr.Textbox(lines=2, placeholder="Enter context here (optional)...")],
48
  outputs="text",
49
  title="Context-Aware Dynamic Response Chatbot",
50
+ description="A chatbot using a NASA-specific bi-encoder model to understand the input context and GPT-2 to generate dynamic responses."
51
  )
52
 
53
  # Launch the interface
 
56
 
57
 
58
 
59
+