import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForCausalLM # Specify the model name from HuggingFace Hub model_name = "Qwen/Qwen2.5-0.5B-Instruct" # Replace with your model if needed # Load the tokenizer. tokenizer = AutoTokenizer.from_pretrained(model_name) # Determine whether a GPU is available device = "cuda" if torch.cuda.is_available() else "cpu" if device == "cuda": # Load the model in half precision for GPU inference model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16) model.to(device) else: # Load the model normally on CPU model = AutoModelForCausalLM.from_pretrained(model_name) def generate_recommendation(characteristics: str) -> str: """ Generates personalized diet recommendations based on input characteristics. """ prompt = ( f"Based on the following characteristics, provide personalized diet recommendations:\n\n" f"{characteristics}\n\n" f"Diet Recommendations:" ) try: # Prepare inputs and move them to the appropriate device inputs = tokenizer(prompt, return_tensors="pt") inputs = {key: value.to(device) for key, value in inputs.items()} # Generate text output_ids = model.generate(**inputs, max_length=512, num_return_sequences=1) recommendation = tokenizer.decode(output_ids[0], skip_special_tokens=True) return recommendation except Exception as e: return f"Error: {str(e)}" # Build the Gradio Blocks interface. with gr.Blocks() as demo: gr.Markdown("# AI Diet Recommendations") gr.Markdown("Get personalized diet recommendations generated by our AI.") with gr.Row(): with gr.Column(): characteristics_input = gr.Textbox( lines=10, placeholder="Enter your characteristics here...", label="Your Characteristics" ) submit_btn = gr.Button("Submit") with gr.Column(): result_output = gr.Textbox(label="Diet Recommendations") # The waiting spinner will be visible while the function executes. submit_btn.click( fn=generate_recommendation, inputs=characteristics_input, outputs=result_output, show_progress=True ) if __name__ == "__main__": demo.launch()