bushcoding commited on
Commit
e020deb
verified
1 Parent(s): 5919d27

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -32
app.py CHANGED
@@ -1,8 +1,19 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
3
 
4
- # Inicializar el cliente para la API de Hugging Face
5
- client = InferenceClient(model="bushai/sar-i-7b")
 
 
 
 
 
 
 
 
 
6
 
7
  def respond(
8
  message,
@@ -12,43 +23,25 @@ def respond(
12
  temperature,
13
  top_p,
14
  ):
15
- messages = [{"role": "system", "content": system_message}]
16
-
17
- for val in history:
18
- if val[0]:
19
- messages.append({"role": "user", "content": val[0]})
20
- if val[1]:
21
- messages.append({"role": "assistant", "content": val[1]})
22
 
23
- messages.append({"role": "user", "content": message})
24
-
25
- # Llamar a la API de Hugging Face para generar la respuesta
26
- response = ""
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- token = message.choices[0].delta.content
35
- response += token
36
- yield response
37
 
38
  # Configurar la interfaz de Gradio
39
  demo = gr.ChatInterface(
40
  respond,
41
  additional_inputs=[
42
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
43
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
44
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
45
- gr.Slider(
46
- minimum=0.1,
47
- maximum=1.0,
48
- value=0.95,
49
- step=0.05,
50
- label="Top-p (nucleus sampling)",
51
- ),
52
  ],
53
  )
54
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from accelerate import init_empty_weights, infer_auto_device_map
4
+ import torch
5
 
6
+ # Nombre del modelo en Hugging Face
7
+ model_name = "bushai/sar-i-7b"
8
+
9
+ # Inicializa un modelo vac铆o para ahorrar memoria
10
+ with init_empty_weights():
11
+ model = AutoModelForCausalLM.from_pretrained(model_name)
12
+
13
+ # Definir c贸mo distribuir el modelo entre CPU y GPU
14
+ device_map = infer_auto_device_map(model, max_memory={0: "14GB", "cpu": "2GB"})
15
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map)
16
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
17
 
18
  def respond(
19
  message,
 
23
  temperature,
24
  top_p,
25
  ):
26
+ # Tokenizar la entrada del usuario
27
+ inputs = tokenizer(message, return_tensors="pt").to(model.device)
 
 
 
 
 
28
 
29
+ # Generar la respuesta del modelo
30
+ outputs = model.generate(inputs['input_ids'], max_new_tokens=max_tokens, temperature=temperature, top_p=top_p)
31
+
32
+ # Decodificar la salida generada
33
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
+
35
+ return response
 
 
 
 
 
 
 
36
 
37
  # Configurar la interfaz de Gradio
38
  demo = gr.ChatInterface(
39
  respond,
40
  additional_inputs=[
41
+ gr.Textbox(value="You are a friendly chatbot.", label="System message"),
42
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
43
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
44
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
 
 
 
 
 
45
  ],
46
  )
47