File size: 2,358 Bytes
f80095a
f5fd46c
 
f80095a
f5fd46c
 
 
 
 
 
 
 
 
9318f75
f5fd46c
 
 
 
f80095a
f5fd46c
 
 
 
 
 
 
 
 
 
 
f217f91
f5fd46c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
094bd2e
f5fd46c
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import gradio as gr
from unsloth import FastLanguageModel
from transformers import AutoTokenizer

# Function to load model and tokenizer
def load_model(hf_token):
    try:
        # Initialize the model using ZeroGPU (to run on CPU in Hugging Face environment)
        model_name = "shukdevdatta123/sql_injection_classifier_DeepSeek_R1_fine_tuned_model"
        model, tokenizer = FastLanguageModel.from_pretrained(
            model_name=model_name,
            load_in_4bit=True,
            token=hf_token,
            # use_zero=True,  # Ensure ZeroGPU usage
        )
        return model, tokenizer
    except Exception as e:
        return None, str(e)

# Function to predict SQL injection
def predict_sql_injection(query, hf_token):
    model, tokenizer = load_model(hf_token)
    
    if model is None:
        return f"Error loading model: {tokenizer}"

    # Prepare the model for inference
    inference_model = FastLanguageModel.for_inference(model)
    
    prompt = f"### Instruction:\nClassify the following SQL query as normal (0) or an injection attack (1).\n\n### Query:\n{query}\n\n### Classification:\n"
    inputs = tokenizer(prompt, return_tensors="pt").to("cuda")

    # Use the inference model for generation
    outputs = inference_model.generate(
        input_ids=inputs.input_ids,
        attention_mask=inputs.attention_mask,
        max_new_tokens=1000,
        use_cache=True,
    )
    prediction = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
    return prediction.split("### Classification:\n")[-1].strip()

# Gradio UI
def classify_sql_injection(query, hf_token):
    if not hf_token:
        return "Please enter your Hugging Face token."
    
    if not query:
        return "Please enter a SQL query first."
    
    result = predict_sql_injection(query, hf_token)
    return f"Prediction: {result}"

# Gradio interface
iface = gr.Interface(
    fn=classify_sql_injection,
    inputs=[
        gr.Textbox(label="SQL Query", placeholder="Enter SQL query here..."),
        gr.Textbox(label="Hugging Face Token", type="password")
    ],
    outputs="text",
    live=True,
    title="SQL Injection Classifier",
    description="Enter an SQL query and your Hugging Face token to classify whether the query is a normal SQL query (0) or a SQL injection attack (1)."
)

# Launch the Gradio app
iface.launch()