Spaces:
Sleeping
Sleeping
import pandas as pd | |
import gradio as gr | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# Load the Hugging Face forecasting model | |
def load_model(): | |
model_name = "Ankur87/Llama2_Time_series_forecasting_7.0" # Using the specified model | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
return model, tokenizer | |
model, tokenizer = load_model() | |
def forecast(csv_file): | |
# Read CSV with correct delimiter and parse timestamps | |
data = pd.read_csv(csv_file.name, sep=";", parse_dates=['timestamp_column']) | |
# Ensure timestamp format is correct | |
data['timestamp_column'] = pd.to_datetime(data['timestamp_column'], format="%Y%m%d %H:%M") | |
# Convert data to a structured format for the model | |
input_text = "\n".join([f"{row['timestamp_column']}: {row['Inbound']}" for _, row in data.iterrows()]) | |
inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True) | |
# Generate forecast | |
with torch.no_grad(): | |
predictions = model.generate(**inputs, max_length=2500, num_return_sequences=1) | |
# Decode the generated forecast | |
forecast_text = tokenizer.decode(predictions[0], skip_special_tokens=True) | |
# Save forecast result to CSV | |
forecasts = pd.DataFrame({'forecast': [forecast_text]}) | |
output_file = "forecasts.csv" | |
forecasts.to_csv(output_file, index=False) | |
return output_file | |
# Gradio Interface | |
iface = gr.Interface( | |
fn=forecast, | |
inputs=gr.File(label="Upload CSV File"), | |
outputs=gr.File(label="Download Forecasts"), | |
title="Time Series Forecasting with Llama2", | |
description="Upload a CSV file with a timestamp column to generate forecasts using Llama2." | |
) | |
iface.launch() | |