import streamlit as st from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "norman-codes/transfer-learning-attempt1" # Load the model and tokenizer from Hugging Face Hub tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) st.title("Text Generation with GPT-Neo") st.write("This is a text generation model running on Hugging Face Spaces. Enter a prompt to generate text.") prompt = st.text_input("Enter your prompt here:") if st.button("Generate Text"): with st.spinner("Generating..."): # Encode the input prompt and generate text input_ids = tokenizer(prompt, return_tensors="pt", add_special_tokens=True).input_ids generated_ids = model.generate(input_ids, max_length=100) generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) st.text_area("Generated Text:", value=generated_text, height=200, max_chars=None, key=None)