cheberle commited on
Commit
d154bbf
·
1 Parent(s): 29eca75
Files changed (1) hide show
  1. app.py +15 -12
app.py CHANGED
@@ -25,29 +25,32 @@ base_model = AutoModelForCausalLM.from_pretrained(
25
  model = PeftModel.from_pretrained(
26
  base_model,
27
  ADAPTER_REPO,
28
- ignore_mismatched_sizes=True, # Add this parameter
29
  )
30
 
31
- def classify_text(text):
32
  """
33
- Simple prompting approach: we ask the model to return a single classification label
34
- (e.g., 'positive', 'negative', etc.).
35
- You can refine this prompt, add chain-of-thought, or multiple classes as needed.
36
  """
37
- prompt = f"Below is some text.\nText: {text}\nPlease classify the sentiment (positive or negative):"
38
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
39
  with torch.no_grad():
40
- outputs = model.generate(**inputs, max_new_tokens=64)
 
 
 
 
 
41
  answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
42
  return answer
43
 
44
  with gr.Blocks() as demo:
45
- gr.Markdown("## Qwen + LoRA Adapter: Text Classification Demo")
46
- input_box = gr.Textbox(lines=3, label="Enter text")
47
- output_box = gr.Textbox(lines=3, label="Model's generated output (classification)")
48
 
49
- classify_btn = gr.Button("Classify")
50
- classify_btn.click(fn=classify_text, inputs=input_box, outputs=output_box)
51
 
52
  if __name__ == "__main__":
53
  demo.launch()
 
25
  model = PeftModel.from_pretrained(
26
  base_model,
27
  ADAPTER_REPO,
28
+ ignore_mismatched_sizes=True,
29
  )
30
 
31
+ def classify_food(text):
32
  """
33
+ Classify or extract food-related terms from the input text.
 
 
34
  """
35
+ prompt = f"Below is some text. Please identify and classify food-related terms.\nText: {text}\nFood classification or extraction:"
36
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
37
  with torch.no_grad():
38
+ outputs = model.generate(
39
+ **inputs,
40
+ max_new_tokens=64,
41
+ temperature=0.7, # Adjust temperature for creativity
42
+ top_p=0.9, # Adjust top_p for diversity
43
+ )
44
  answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
45
  return answer
46
 
47
  with gr.Blocks() as demo:
48
+ gr.Markdown("## Qwen + LoRA Adapter: Food Classification/Extraction Demo")
49
+ input_box = gr.Textbox(lines=3, label="Enter text containing food items")
50
+ output_box = gr.Textbox(lines=3, label="Model's classification or extraction output")
51
 
52
+ classify_btn = gr.Button("Analyze Food Terms")
53
+ classify_btn.click(fn=classify_food, inputs=input_box, outputs=output_box)
54
 
55
  if __name__ == "__main__":
56
  demo.launch()