ssocean's picture
241204
cd5bd02 verified
raw
history blame
1.49 kB
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch.nn.functional as F
model_path = "ssocean/NAIP" # 更换为你的模型路径
model = AutoModelForSequenceClassification.from_pretrained(model_path, num_labels=1, load_in_8bit=True)
tokenizer = AutoTokenizer.from_pretrained(model_path)
device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
model.eval()
def predict(title, abstract):
# 将标题和摘要处理为一个单一的字符串
text = f"Given a certain paper, Title: {title}\nAbstract: {abstract}.\nPredict its normalized academic impact (between 0 and 1):"
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
with torch.no_grad():
outputs = model(**inputs.to(device))
# 应用 Sigmoid 函数来获取概率输出
probability = torch.sigmoid(outputs.logits).item()
return {"Impact Probability": probability}
# 创建 Gradio 界面
iface = gr.Interface(
fn=predict,
inputs=[gr.inputs.Textbox(lines=2, placeholder="Enter Paper Title Here..."),
gr.inputs.Textbox(lines=5, placeholder="Enter Paper Abstract Here...")],
outputs=[gr.outputs.Label(num_top_classes=1)],
title="Newborn Article Impact Prediction based on LLM",
description="Predict the normalized academic impact of a paper based on its title and abstract."
)
iface.launch()