Spaces:
Sleeping
Sleeping
from fastapi import FastAPI | |
from transformers import BertForSequenceClassification, AutoTokenizer | |
import torch | |
import os | |
MODEL_PATH = "hariharan220/finbert-stock-sentiment" | |
# β Load Hugging Face API Key from environment variables | |
HF_API_KEY = os.getenv("HF_API_KEY") | |
if not HF_API_KEY: | |
raise ValueError("β ERROR: Hugging Face API Key (HF_API_KEY) is missing. Set it in your environment variables.") | |
# β Configure authentication for Hugging Face | |
os.environ["HF_HOME"] = "/root/.cache/huggingface" | |
os.environ["HF_API_TOKEN"] = HF_API_KEY # Explicitly set API key | |
os.environ["HUGGINGFACE_HUB_TOKEN"] = HF_API_KEY | |
print("π Authenticating with Hugging Face...") | |
# β Load the model with authentication | |
model = BertForSequenceClassification.from_pretrained( | |
MODEL_PATH | |
) | |
tokenizer = AutoTokenizer.from_pretrained( | |
MODEL_PATH | |
) | |
print("β Model Loaded Successfully!") | |
# β Define sentiment labels | |
labels = ["Negative", "Neutral", "Positive"] | |
# β Create FastAPI app | |
app = FastAPI() | |
async def home(): | |
return {"message": "Stock Sentiment Analysis API is running!"} | |
async def predict_sentiment(text: str): | |
"""Predicts sentiment of stock-related text using FinBERT""" | |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
logits = outputs.logits | |
prediction = torch.argmax(logits, dim=1).item() | |
sentiment = labels[prediction] | |
return {"text": text, "sentiment": sentiment} | |