Spaces:
Running
Running
File size: 1,354 Bytes
b4c83a0 ce32929 f3ad51c b4c83a0 84f7a7c b4c83a0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
from fastapi import FastAPI
from transformers import BertForSequenceClassification, AutoTokenizer
import torch
import os
MODEL_PATH = "hariharan220/finbert-stock-sentiment"
# β
Load Hugging Face API token from environment variable
HF_API_TOKEN = os.getenv("HF_API_KEY") # Change from HF_API_TOKEN to HF_API_KEY
if not HF_API_TOKEN:
raise ValueError("β ERROR: Hugging Face API Key (HF_API_KEY) is missing. Set it in your environment variables.")
# β
Authenticate when loading model
model = BertForSequenceClassification.from_pretrained(
MODEL_PATH,
use_auth_token=HF_API_TOKEN
)
tokenizer = AutoTokenizer.from_pretrained(
MODEL_PATH,
use_auth_token=HF_API_TOKEN
)
# β
Define sentiment labels
labels = ["Negative", "Neutral", "Positive"]
# β
Create FastAPI app
app = FastAPI()
@app.get("/")
async def home():
return {"message": "Stock Sentiment Analysis API is running!"}
@app.post("/predict")
async def predict_sentiment(text: str):
"""Predicts sentiment of stock-related text using FinBERT"""
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
prediction = torch.argmax(logits, dim=1).item()
sentiment = labels[prediction]
return {"text": text, "sentiment": sentiment}
|