Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -14,23 +14,22 @@ client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct")
|
|
14 |
|
15 |
client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct")
|
16 |
|
17 |
-
def greet(
|
18 |
response = ""
|
19 |
for token in client.text_generation(
|
20 |
-
|
21 |
stream=True,
|
22 |
-
max_new_tokens=
|
23 |
temperature=0.7,
|
24 |
top_p=0.95
|
25 |
):
|
26 |
-
print(token)
|
27 |
response += token
|
28 |
yield response
|
29 |
|
30 |
demo = gr.Interface(
|
31 |
fn=greet,
|
32 |
inputs=[
|
33 |
-
gr.Textbox(label="
|
34 |
],
|
35 |
outputs=[gr.Textbox(label="result", lines=3)],
|
36 |
api_name="generate",
|
|
|
14 |
|
15 |
client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct")
|
16 |
|
17 |
+
def greet(prompt):
|
18 |
response = ""
|
19 |
for token in client.text_generation(
|
20 |
+
prompt,
|
21 |
stream=True,
|
22 |
+
max_new_tokens=128,
|
23 |
temperature=0.7,
|
24 |
top_p=0.95
|
25 |
):
|
|
|
26 |
response += token
|
27 |
yield response
|
28 |
|
29 |
demo = gr.Interface(
|
30 |
fn=greet,
|
31 |
inputs=[
|
32 |
+
gr.Textbox(label="prompt", value="The huggingface_hub library is ")
|
33 |
],
|
34 |
outputs=[gr.Textbox(label="result", lines=3)],
|
35 |
api_name="generate",
|