File size: 891 Bytes
d799eb4
 
568f87d
 
 
 
 
 
d799eb4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import gradio as gr
from transformers import pipeline
from transformers_modules.Qwen.Qwen2.5-Math-PRM-72B.configuration_qwen2_rm import Qwen2RMConfig
from transformers_modules.Qwen.Qwen2.5-Math-PRM-72B.modeling_qwen2_rm import Qwen2RMForSequenceClassification

model = Qwen2RMForSequenceClassification.from_pretrained("Qwen/Qwen2.5-Math-PRM-72B")
pipe = pipeline("text-classification", model=model)


# Load the model
pipe = pipeline("text-classification", model="Qwen/Qwen2.5-Math-PRM-72B", trust_remote_code=True)

# Define a function to generate responses
def generate_response(prompt):
    output = pipe(prompt)
    return output[0]['label']

# Create a Gradio interface
iface = gr.Interface(
    fn=generate_response,
    inputs="text",
    outputs="text",
    title="Qwen2.5 Math Model",
    description="Ask a math question and get an answer!"
)

# Launch the interface
iface.launch()