import gradio as gr from transformers import T5Tokenizer, T5ForConditionalGeneration from datasets import load_dataset # Load the tokenizer and model tokenizer = T5Tokenizer.from_pretrained('t5-small') model = T5ForConditionalGeneration.from_pretrained('t5-small') def generate_sql(question): # Format the question for the model if needed. For example: # input_text = f"translate English to SQL: {question}" input_text = f"{question}" # Directly use the question if the model is fine-tuned for SQL generation # Tokenize the input text input_ids = tokenizer.encode(input_text, return_tensors="pt") # Generate the output sequence output_ids = model.generate(input_ids, max_length=512, num_beams=5)[0] # Decode the generated ids to get the SQL query sql_query = tokenizer.decode(output_ids, skip_special_tokens=True) return sql_query # Define the Gradio interface iface = gr.Interface( fn=generate_sql, inputs=gr.Textbox(lines=2, placeholder="Enter your question here..."), outputs=gr.Textbox(), title="Natural Language to SQL", description="This app uses a Seq2Seq model to generate SQL queries from natural language questions." ) # Launch the app if __name__ == "__main__": iface.launch()