Alhdrawi commited on
Commit
754f31f
·
verified ·
1 Parent(s): ac26792

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -19
app.py CHANGED
@@ -1,14 +1,28 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
 
3
 
4
- # تحميل النموذج والتوكن
 
5
  model_id = "Alhdrawi/R-Ray-Ai-model"
6
-
7
- tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
8
- model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
9
-
10
- # إنشاء pipeline
11
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  def respond(message, history, system_message, max_tokens, temperature, top_p):
14
  # تجهيز البرومبت الكامل
@@ -17,17 +31,16 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
17
  full_prompt += f"User: {user_msg}\nAssistant: {bot_msg}\n"
18
  full_prompt += f"User: {message}\nAssistant:"
19
 
20
- # التوليد
21
- result = pipe(
22
- full_prompt,
23
- max_new_tokens=max_tokens,
24
- temperature=temperature,
25
- top_p=top_p,
26
- do_sample=True,
27
- )
28
-
29
- output = result[0]["generated_text"].split("Assistant:")[-1].strip()
30
- return output
31
 
32
  # واجهة Gradio
33
  demo = gr.ChatInterface(
 
1
  import gradio as gr
2
+ import requests
3
+ import os
4
 
5
+ # إعدادات API
6
+ HF_API_TOKEN = os.environ.get("HF_API_TOKEN") # ← استبدله بتوكنك الحقيقي
7
  model_id = "Alhdrawi/R-Ray-Ai-model"
8
+ API_URL = f"https://api-inference.huggingface.co/models/{model_id}"
9
+
10
+ headers = {
11
+ "Authorization": f"Bearer {HF_API_TOKEN}"
12
+ }
13
+
14
+ def query_huggingface_api(prompt, max_tokens, temperature, top_p):
15
+ payload = {
16
+ "inputs": prompt,
17
+ "parameters": {
18
+ "max_new_tokens": max_tokens,
19
+ "temperature": temperature,
20
+ "top_p": top_p,
21
+ "do_sample": True
22
+ }
23
+ }
24
+ response = requests.post(API_URL, headers=headers, json=payload)
25
+ return response.json()
26
 
27
  def respond(message, history, system_message, max_tokens, temperature, top_p):
28
  # تجهيز البرومبت الكامل
 
31
  full_prompt += f"User: {user_msg}\nAssistant: {bot_msg}\n"
32
  full_prompt += f"User: {message}\nAssistant:"
33
 
34
+ # استدعاء API
35
+ result = query_huggingface_api(full_prompt, max_tokens, temperature, top_p)
36
+
37
+ # استخراج الرد من الناتج
38
+ try:
39
+ output_text = result[0]["generated_text"]
40
+ final_response = output_text.split("Assistant:")[-1].strip()
41
+ return final_response
42
+ except Exception as e:
43
+ return f"❌ Error: {str(result)}"
 
44
 
45
  # واجهة Gradio
46
  demo = gr.ChatInterface(