import gradio as gr from transformers import pipeline import torch # Check if a GPU is available device = 0 if torch.cuda.is_available() else -1 # Load the text-generation pipeline with the appropriate device model = pipeline( "text-generation", model="rish13/polymers", device=device # Automatically use GPU if available, otherwise CPU ) def generate_response(prompt): # Generate text from the model response = model( prompt, max_length=70, # Adjusted to generate shorter text num_return_sequences=1, temperature=0.6, # Increased to add more randomness top_k=100, # Increased to allow a wider selection of words top_p=0.95 # Slightly increased cumulative probability threshold ) # Get the generated text from the response generated_text = response[0]['generated_text'] return generated_text # Define the Gradio interface interface = gr.Interface( fn=generate_response, inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt"), outputs="text", title="Polymer Knowledge Model", description="A model fine-tuned for generating text related to polymers." ) # Launch the interface interface.launch()