chenluuli's picture
Update app.py
65308ec verified
import gradio as gr
from transformers import pipeline
# from huggingface_hub import InferenceClient
# from transformers import pipeline
# modelName = "chenluuli/test-text-vis"
# pipeline = pipeline(task="image-classification", model="chenluuli/test-text-vis")
# def predict(input_img):
# predictions = pipeline(input_img)
# return input_img, {p["label"]: p["score"] for p in predictions}
# gradio_app = gr.Interface(
# predict,
# inputs="text",
# outputs="text",
# title="demo",
# )
# if __name__ == "__main__":
# gradio_app.launch()
token = "" # todo 支持外部传入
def initClient():
# Initialize client for a specific model
client = InferenceClient(
model="prompthero/openjourney-v4",
#base_url=...,
#api_key=...,
)
return client
def greet(input):
modelName = "chenluuli/test-text-vis"
text2text_generator = pipeline("text-generation", model="Qwen/Qwen2.5-0.5B-Instruct", torch_dtype="auto", device_map="auto")
prompt = "##你是一个可视化专家,通过我提供的信息,推荐合理的图表配置##请根据这些信息,返回合理的图表类型 >>我输入的数据如下:"
messages = [{
"role": "user",
"content": prompt+input,
}]
response = text2text_generator(
messages,
max_length=512
)
print(response, response[0]['generated_text'])
return response[0]['generated_text']
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
demo.launch()
# title = "demo"
# description = "Gradio Demo for custom demo"
# # examples = [
# # ["The tower is 324 metres (1,063 ft) tall,"],
# # ["The Moon's orbit around Earth has"],
# # ["The smooth Borealis basin in the Northern Hemisphere covers 40%"],
# # ]
# gr.Interface.load(
# "huggingface/chenluuli/test-text-vis",
# inputs=gr.Textbox(lines=5, label="Input Text"),
# outputs="text",
# #title=title,
# #description=description,
# # article=article,
# # examples=examples,
# #enable_queue=True,
# ).launch()