hhelesto commited on
Commit
b330f19
·
verified ·
1 Parent(s): ff73cf8

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -0
app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
4
+ from peft import PeftModel
5
+ from flask import Flask, request, jsonify, render_template
6
+
7
+ # --- Load Model & Tokenizer ---
8
+
9
+ base_model_name = "unsloth/llama-3.2-3b-bnb-4bit"
10
+ adapter_model_name = "aismaanly/ai_synthetic"
11
+
12
+ bnb_config = BitsAndBytesConfig(
13
+ load_in_4bit=True,
14
+ bnb_4bit_quant_type="nf4",
15
+ bnb_4bit_use_double_quant=True,
16
+ bnb_4bit_compute_dtype=torch.bfloat16
17
+ )
18
+
19
+ print("Loading base model...")
20
+ model = AutoModelForCausalLM.from_pretrained(
21
+ base_model_name,
22
+ quantization_config=bnb_config,
23
+ device_map="auto"
24
+ )
25
+
26
+ print("Loading tokenizer...")
27
+ tokenizer = AutoTokenizer.from_pretrained(base_model_name)
28
+
29
+ print("Loading PEFT adapter...")
30
+ model = PeftModel.from_pretrained(model, adapter_model_name)
31
+ model = model.merge_and_unload()
32
+ print("Model ready!")
33
+
34
+ # --- Flask App ---
35
+
36
+ app = Flask(__name__)
37
+
38
+ @app.route("/")
39
+ def index():
40
+ return render_template("index.html")
41
+
42
+ @app.route("/generate", methods=["POST"])
43
+ def generate():
44
+ data = request.get_json()
45
+ prompt = data.get("prompt")
46
+
47
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
48
+ outputs = model.generate(**inputs, max_new_tokens=100)
49
+ text = tokenizer.decode(outputs[0], skip_special_tokens=True)
50
+
51
+ return jsonify({
52
+ "generated_text": text
53
+ })
54
+
55
+ if __name__ == "__main__":
56
+ port = int(os.environ.get("PORT", 7860))
57
+ app.run(host="0.0.0.0", port=port)