import requests import os from transformers import pipeline from transformers import Tool # Import other necessary libraries if needed class TextGenerationTool(Tool): name = "text_generator" description = ( "This is a tool for text generation. It takes a prompt as input and returns the generated text." ) inputs = ["text"] outputs = ["text"] def __call__(self, prompt: str): #API_URL = "https://api-inference.huggingface.co/models/openchat/openchat_3.5" #headers = {"Authorization": "Bearer " + os.environ['hf']} token=os.environ['HF_token'] #payload = { # "inputs": prompt # Adjust this based on your model's input format #} #payload = { # "inputs": "Can you please let us know more details about your ", # } #def query(payload): #generated_text = requests.post(API_URL, headers=headers, json=payload).json() #print(generated_text) #return generated_text["text"] # Replace the following line with your text generation logic #generated_text = f"Generated text based on the prompt: '{prompt}'" # Initialize the text generation pipeline #text_generator = pipeline(model="lgaalves/gpt2-dolly", token=token) text_generator = pipeline(model="microsoft/Orca-2-13b", token=token) # Generate text based on a prompt generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7) # Print the generated text print(generated_text) return generated_text # Define the payload for the request #payload = { # "inputs": prompt # Adjust this based on your model's input format #} # Make the request to the API #generated_text = requests.post(API_URL, headers=headers, json=payload).json() # Extract and return the generated text #return generated_text["generated_text"] # Uncomment and customize the following lines based on your text generation needs # text_generator = pipeline(model="gpt2") # generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7) # Print the generated text if needed # print(generated_text)