|
import gradio as gr |
|
from unsloth import FastLanguageModel |
|
from transformers import AutoTokenizer |
|
|
|
|
|
def load_model(hf_token): |
|
try: |
|
|
|
model_name = "shukdevdatta123/sql_injection_classifier_DeepSeek_R1_fine_tuned_model" |
|
model, tokenizer = FastLanguageModel.from_pretrained( |
|
model_name=model_name, |
|
load_in_4bit=True, |
|
token=hf_token, |
|
|
|
) |
|
return model, tokenizer |
|
except Exception as e: |
|
return None, str(e) |
|
|
|
|
|
def predict_sql_injection(query, hf_token): |
|
model, tokenizer = load_model(hf_token) |
|
|
|
if model is None: |
|
return f"Error loading model: {tokenizer}" |
|
|
|
|
|
inference_model = FastLanguageModel.for_inference(model) |
|
|
|
prompt = f"### Instruction:\nClassify the following SQL query as normal (0) or an injection attack (1).\n\n### Query:\n{query}\n\n### Classification:\n" |
|
inputs = tokenizer(prompt, return_tensors="pt").to("cuda") |
|
|
|
|
|
outputs = inference_model.generate( |
|
input_ids=inputs.input_ids, |
|
attention_mask=inputs.attention_mask, |
|
max_new_tokens=1000, |
|
use_cache=True, |
|
) |
|
prediction = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] |
|
return prediction.split("### Classification:\n")[-1].strip() |
|
|
|
|
|
def classify_sql_injection(query, hf_token): |
|
if not hf_token: |
|
return "Please enter your Hugging Face token." |
|
|
|
if not query: |
|
return "Please enter a SQL query first." |
|
|
|
result = predict_sql_injection(query, hf_token) |
|
return f"Prediction: {result}" |
|
|
|
|
|
iface = gr.Interface( |
|
fn=classify_sql_injection, |
|
inputs=[ |
|
gr.Textbox(label="SQL Query", placeholder="Enter SQL query here..."), |
|
gr.Textbox(label="Hugging Face Token", type="password") |
|
], |
|
outputs="text", |
|
live=True, |
|
title="SQL Injection Classifier", |
|
description="Enter an SQL query and your Hugging Face token to classify whether the query is a normal SQL query (0) or a SQL injection attack (1)." |
|
) |
|
|
|
|
|
iface.launch() |
|
|