Spaces:
Sleeping
Sleeping
File size: 2,068 Bytes
d2c3c24 606143e bc2e997 d2c3c24 9b13b06 7f2032c 9b13b06 171f1eb 5512795 65308ec 171f1eb a82954c 89be006 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
import gradio as gr
from transformers import pipeline
# from huggingface_hub import InferenceClient
# from transformers import pipeline
# modelName = "chenluuli/test-text-vis"
# pipeline = pipeline(task="image-classification", model="chenluuli/test-text-vis")
# def predict(input_img):
# predictions = pipeline(input_img)
# return input_img, {p["label"]: p["score"] for p in predictions}
# gradio_app = gr.Interface(
# predict,
# inputs="text",
# outputs="text",
# title="demo",
# )
# if __name__ == "__main__":
# gradio_app.launch()
token = "" # todo 支持外部传入
def initClient():
# Initialize client for a specific model
client = InferenceClient(
model="prompthero/openjourney-v4",
#base_url=...,
#api_key=...,
)
return client
def greet(input):
modelName = "chenluuli/test-text-vis"
text2text_generator = pipeline("text-generation", model="Qwen/Qwen2.5-0.5B-Instruct", torch_dtype="auto", device_map="auto")
prompt = "##你是一个可视化专家,通过我提供的信息,推荐合理的图表配置##请根据这些信息,返回合理的图表类型 >>我输入的数据如下:"
messages = [{
"role": "user",
"content": prompt+input,
}]
response = text2text_generator(
messages,
max_length=512
)
print(response, response[0]['generated_text'])
return response[0]['generated_text']
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
demo.launch()
# title = "demo"
# description = "Gradio Demo for custom demo"
# # examples = [
# # ["The tower is 324 metres (1,063 ft) tall,"],
# # ["The Moon's orbit around Earth has"],
# # ["The smooth Borealis basin in the Northern Hemisphere covers 40%"],
# # ]
# gr.Interface.load(
# "huggingface/chenluuli/test-text-vis",
# inputs=gr.Textbox(lines=5, label="Input Text"),
# outputs="text",
# #title=title,
# #description=description,
# # article=article,
# # examples=examples,
# #enable_queue=True,
# ).launch()
|