haielab commited on
Commit
bc4f9eb
·
verified ·
1 Parent(s): fa49e9c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -25
app.py CHANGED
@@ -2,39 +2,66 @@ import gradio as gr
2
  import torch
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
 
5
- # Load tokenizer and model
6
- model_name = "Qwen/Qwen2.5-0.5B-Instruct" # replace with actual model name or path
 
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
9
- if torch.cuda.is_available():
10
- # Load model in half precision and move to GPU for faster inference
 
 
11
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16)
12
- model.to("cuda")
13
  else:
14
- # Load model on CPU (full precision by default)
15
  model = AutoModelForCausalLM.from_pretrained(model_name)
16
 
17
- # Define the text generation function
18
- def generate_text(prompt):
19
- # Tokenize input and move to appropriate device
20
- inputs = tokenizer(prompt, return_tensors="pt")
21
- if torch.cuda.is_available():
22
- inputs = {key: value.to("cuda") for key, value in inputs.items()}
23
- # Generate text
24
- output_ids = model.generate(**inputs, max_length=200, do_sample=True, temperature=0.7)
25
- # Decode the generated tokens to text
26
- generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
27
- return generated_text
 
 
 
 
 
 
 
 
 
28
 
29
- # Set up Gradio interface
30
  with gr.Blocks() as demo:
31
- gr.Markdown("## Text Generation Demo") # title or description (optional)
32
- user_input = gr.Textbox(label="Enter your prompt:")
33
- output_text = gr.Textbox(label="Generated output:")
34
- generate_btn = gr.Button("Generate")
35
- # When button is clicked, call generate_text with user_input and show in output_text
36
- generate_btn.click(fn=generate_text, inputs=user_input, outputs=output_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
- # Launch the app (if running as a standalone script)
39
  if __name__ == "__main__":
40
  demo.launch()
 
2
  import torch
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
 
5
+ # Specify the model name from HuggingFace Hub
6
+ model_name = "Qwen/Qwen2.5-0.5B-Instruct" # Replace with your model if needed
7
+
8
+ # Load the tokenizer.
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
 
11
+ # Determine whether a GPU is available
12
+ device = "cuda" if torch.cuda.is_available() else "cpu"
13
+ if device == "cuda":
14
+ # Load the model in half precision for GPU inference
15
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16)
16
+ model.to(device)
17
  else:
18
+ # Load the model normally on CPU
19
  model = AutoModelForCausalLM.from_pretrained(model_name)
20
 
21
+ def generate_recommendation(characteristics: str) -> str:
22
+ """
23
+ Generates personalized diet recommendations based on input characteristics.
24
+ """
25
+ prompt = (
26
+ f"Based on the following characteristics, provide personalized diet recommendations:\n\n"
27
+ f"{characteristics}\n\n"
28
+ f"Diet Recommendations:"
29
+ )
30
+ try:
31
+ # Prepare inputs and move them to the appropriate device
32
+ inputs = tokenizer(prompt, return_tensors="pt")
33
+ inputs = {key: value.to(device) for key, value in inputs.items()}
34
+
35
+ # Generate text
36
+ output_ids = model.generate(**inputs, max_length=512, num_return_sequences=1)
37
+ recommendation = tokenizer.decode(output_ids[0], skip_special_tokens=True)
38
+ return recommendation
39
+ except Exception as e:
40
+ return f"Error: {str(e)}"
41
 
42
+ # Build the Gradio Blocks interface.
43
  with gr.Blocks() as demo:
44
+ gr.Markdown("# AI Diet Recommendations")
45
+ gr.Markdown("Get personalized diet recommendations generated by our AI.")
46
+
47
+ with gr.Row():
48
+ with gr.Column():
49
+ characteristics_input = gr.Textbox(
50
+ lines=10,
51
+ placeholder="Enter your characteristics here...",
52
+ label="Your Characteristics"
53
+ )
54
+ submit_btn = gr.Button("Submit")
55
+ with gr.Column():
56
+ result_output = gr.Textbox(label="Diet Recommendations")
57
+
58
+ # The waiting spinner will be visible while the function executes.
59
+ submit_btn.click(
60
+ fn=generate_recommendation,
61
+ inputs=characteristics_input,
62
+ outputs=result_output,
63
+ show_progress=True
64
+ )
65
 
 
66
  if __name__ == "__main__":
67
  demo.launch()