from fastapi import APIRouter from datetime import datetime from datasets import load_dataset from sklearn.metrics import accuracy_score import numpy as np import random import torch from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer from .utils.evaluation import TextEvaluationRequest from .utils.emissions import tracker, clean_emissions_data, get_space_info router = APIRouter() DESCRIPTIONS = { "baseline": "baseline most common class", "bert-base": "bert base finetuned", "bert-medium": "to be implemented", "bert-small": "to be implemented", "bert-mini": "to be implemented", "bert-tiny": "to be implemented", } ROUTE = "/text" def baseline_model(dataset_length: int): # Make random predictions (placeholder for actual model inference) # predictions = [random.randint(0, 7) for _ in range(dataset_length)] # My favorate baseline is the most common class. predictions = [0] * dataset_length return predictions def bert_model(test_dataset: dict, model_type: str): print("Starting my code block.") texts = test_dataset["quote"] model_repo = f"Nonnormalizable/frugal-ai-text-{model_type}" config = AutoConfig.from_pretrained(model_repo) model = AutoModelForSequenceClassification.from_pretrained(model_repo) tokenizer = AutoTokenizer.from_pretrained(model_repo) if torch.cuda.is_available(): device = torch.device("cuda") else: device = torch.device("cpu") print("device:", device) model = model.to(device) test_encoding = tokenizer( texts, truncation=True, padding=True, return_tensors="pt", ) model.eval() with torch.no_grad(): test_input_ids = test_encoding["input_ids"].to(device) test_attention_mask = test_encoding["attention_mask"].to(device) print("Starting model run.") outputs = model(test_input_ids, test_attention_mask) print("End of model run.") predictions = torch.argmax(outputs.logits, dim=1) predictions = predictions.cpu().numpy() print("End of my code block.") return predictions @router.post(ROUTE, tags=["Text Task"]) async def evaluate_text( request: TextEvaluationRequest, model_type="bert-base", ): """ Evaluate text classification for climate disinformation detection. Current Model: Random Baseline - Makes random predictions from the label space (0-7) - Used as a baseline for comparison """ # Get space info username, space_url = get_space_info() # Define the label mapping LABEL_MAPPING = { "0_not_relevant": 0, "1_not_happening": 1, "2_not_human": 2, "3_not_bad": 3, "4_solutions_harmful_unnecessary": 4, "5_science_unreliable": 5, "6_proponents_biased": 6, "7_fossil_fuels_needed": 7, } # Load and prepare the dataset dataset = load_dataset(request.dataset_name) # Convert string labels to integers dataset = dataset.map(lambda x: {"label": LABEL_MAPPING[x["label"]]}) # Split dataset train_test = dataset["train"].train_test_split( test_size=request.test_size, seed=request.test_seed ) test_dataset = train_test["test"] # Start tracking emissions tracker.start() tracker.start_task("inference") # -------------------------------------------------------------------------------------------- # YOUR MODEL INFERENCE CODE HERE # Update the code below to replace the random baseline by your model inference within the inference pass where the energy consumption and emissions are tracked. # -------------------------------------------------------------------------------------------- true_labels = test_dataset["label"] if model_type == "baseline": predictions = baseline_model(len(true_labels)) elif model_type[:5] == "bert-": predictions = bert_model(test_dataset, model_type) else: raise ValueError(model_type) # -------------------------------------------------------------------------------------------- # YOUR MODEL INFERENCE STOPS HERE # -------------------------------------------------------------------------------------------- # Stop tracking emissions emissions_data = tracker.stop_task() # Calculate accuracy accuracy = accuracy_score(true_labels, predictions) # Prepare results dictionary results = { "username": username, "space_url": space_url, "submission_timestamp": datetime.now().isoformat(), "model_description": DESCRIPTIONS[model_type], "accuracy": float(accuracy), "energy_consumed_wh": emissions_data.energy_consumed * 1000, "emissions_gco2eq": emissions_data.emissions * 1000, "emissions_data": clean_emissions_data(emissions_data), "api_route": ROUTE, "dataset_config": { "dataset_name": request.dataset_name, "test_size": request.test_size, "test_seed": request.test_seed, }, } return results