import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer import torch.nn.functional as F def load_model(model_directory): # Assuming 'config.json' and 'pytorch_model.bin' are in 'model_directory' model = AutoModelForSequenceClassification.from_pretrained(model_directory) tokenizer = AutoTokenizer.from_pretrained(model_directory) return model, tokenizer def predict(model, tokenizer, input_text): # Preprocess the input inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True) # Move tensors to the same device as the model device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) inputs = {k: v.to(device) for k, v in inputs.items()} # Model in evaluation mode model.eval() # Make the model generate a prediction with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # Convert logits to probabilities probabilities = F.softmax(logits, dim=1) # Get the predicted class and the probabilities predicted_class = torch.argmax(probabilities, dim=1).cpu().numpy() probabilities = probabilities.cpu().numpy() return predicted_class, probabilities def main(): # Replace 'your-model-directory' with the actual path to your model directory model_directory = "Kurkur99/modeling" # e.g., "Kurkur99/Kurkur99/transactionmerchant/model_directory" model, tokenizer = load_model(model_directory) # Example input text input_text = "Example input text for prediction" # Get predictions predicted_class, probabilities = predict(model, tokenizer, input_text) # Output the results print(f"Predicted Class: {predicted_class[0]}") print(f"Probabilities: {probabilities[0]}") if __name__ == "__main__": main()