Inference

from transformers import AutoTokenizer, AutoModelForSequenceClassification
import time
import torch

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = AutoModelForSequenceClassification.from_pretrained("AquilaX-AI/Review").to(device)
tokenizer = AutoTokenizer.from_pretrained("AquilaX-AI/Review")

partial_code = "if (userInput.length > 255) { return; }"  # Example snippet of insecure code
cwe_id = "CWE-22"  # Example CWE ID for Path Traversal
cwe_name = "Improper Limitation of a Pathname to a Restricted Directory"  # Example CWE Name
affected_line = "42"  # Example line number in the code file
file_name = "utils/inputValidator.js"  # Example file name
org_id = "12345"  # Example organization ID

start = time.time()

prompt = f"""partial_code: {partial_code} , cwe_id: {cwe_id} , cwe_name: {cwe_name}, affected_line: {affected_line},file_name: {file_name}, org_id: {org_id}"""
inputs = tokenizer(prompt, return_tensors="pt").to(device)
with torch.no_grad():
    logits = model(**inputs).logits
predicted_class_id = logits.argmax().item()
predicted_class = model.config.id2label[predicted_class_id]

print(predicted_class)
print(time.time() - start)
Downloads last month
0
Safetensors
Model size
67M params
Tensor type
F32
·
Inference Providers NEW
This model is not currently available via any of the supported third-party Inference Providers, and the model is not deployed on the HF Inference API.