Spaces:
Sleeping
Sleeping
File size: 5,734 Bytes
6ec5122 ac53e7e 0839f6d ac53e7e ce5cc2b ac53e7e ce5cc2b ac53e7e ce5cc2b ac53e7e ce5cc2b ac53e7e ce5cc2b ac53e7e ce5cc2b ac53e7e ce5cc2b ac53e7e ce5cc2b ac53e7e ce5cc2b ac53e7e ce5cc2b ac53e7e ce5cc2b ac53e7e ce5cc2b ac53e7e ce5cc2b ac53e7e ce5cc2b ac53e7e ce5cc2b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import os
import subprocess
import sys
import importlib
def check_and_install_package(package_name):
try:
importlib.import_module(package_name)
print(f"{package_name} is already installed")
except ImportError:
print(f"Installing {package_name}...")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package_name])
def install_requirements():
try:
# Upgrade pip first
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--upgrade', 'pip'])
# Install requirements from file
requirements_path = os.path.join(os.path.dirname(__file__), 'requirement.txt')
if os.path.exists(requirements_path):
print("Installing requirements from requirements.txt...")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', requirements_path])
print("Successfully installed requirements")
else:
print(f"requirements.txt not found at {requirements_path}")
# Explicitly check and install critical packages
critical_packages = ['sentencepiece', 'transformers', 'torch', 'gradio']
for package in critical_packages:
check_and_install_package(package)
except subprocess.CalledProcessError as e:
print(f"Error installing requirements: {e}")
raise
except Exception as e:
print(f"Unexpected error during installation: {e}")
raise
# Install requirements first
print("Starting installation of requirements...")
install_requirements()
print("Finished installing requirements")
import gradio as gr
import torch
import os
from transformers import BertTokenizer, BertForSequenceClassification, XLNetTokenizer, XLNetForSequenceClassification
# Load models
logbert_model = BertForSequenceClassification.from_pretrained(
"Sirapatsorn/Spark_Log_Analysis-logbert", # แก้เป็น path ที่ถูกต้อง
token=os.getenv("HUGGINGFACE_TOKEN")
)
xlnet_model = XLNetForSequenceClassification.from_pretrained(
"Sirapatsorn/Spark_Log_Analysis-xlnet", # แก้เป็น path ที่ถูกต้อง
token=os.getenv("HUGGINGFACE_TOKEN")
)
# Load tokenizers
logbert_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
xlnet_tokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased")
# Check for GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logbert_model.to(device)
xlnet_model.to(device)
def predict_log(text):
# LogBERT prediction
logbert_inputs = logbert_tokenizer(text, return_tensors="pt", truncation=True, max_length=512).to(device)
with torch.no_grad():
logbert_outputs = logbert_model(**logbert_inputs)
log_level = torch.argmax(logbert_outputs.logits, dim=1).item()
log_level_confidence = torch.softmax(logbert_outputs.logits, dim=1)[0][log_level].item()
log_levels = ["INFO", "WARN", "ERROR"]
log_level_result = log_levels[log_level]
# XLNet prediction
xlnet_inputs = xlnet_tokenizer(text, return_tensors="pt", truncation=True, max_length=512).to(device)
with torch.no_grad():
xlnet_outputs = xlnet_model(**xlnet_inputs)
performance_value = xlnet_outputs.logits.item()
# Determine performance status
if performance_value < 0:
performance_status = "Good Performance"
elif performance_value < 3.0:
performance_status = "Normal Performance"
else:
performance_status = "Poor Performance"
return {
"Log Level": log_level_result,
"Confidence": f"{log_level_confidence:.2f}",
"Performance Value": f"{performance_value:.2f}",
"Performance Status": performance_status
}
def predict_from_text(text):
prediction = predict_log(text)
return (f"Log Level: {prediction['Log Level']} (Confidence: {prediction['Confidence']})\n"
f"Performance Value: {prediction['Performance Value']}\n"
f"Performance Status: {prediction['Performance Status']}")
def predict_from_file(file):
results = []
with open(file.name, 'r') as f:
for line in f:
prediction = predict_log(line.strip())
result_text = (f"Log: {line.strip()}\n"
f"Log Level: {prediction['Log Level']} (Confidence: {prediction['Confidence']})\n"
f"Performance Value: {prediction['Performance Value']}\n"
f"Performance Status: {prediction['Performance Status']}")
results.append(result_text)
return "\n\n".join(results)
# Create Gradio interface
custom_css = """
.gr-button {
background-color: #FFA500 !important;
color: #FFFFFF !important;
border: none !important;
}
"""
with gr.Blocks(css=custom_css) as demo:
gr.Markdown("# Spark Log Analysis")
gr.Markdown("Analyze your log messages for log level and performance prediction")
with gr.Tabs():
with gr.TabItem("Upload File"):
file_upload = gr.File(label="Upload Log File")
file_btn = gr.Button("Predict")
file_output = gr.Textbox(label="Output", lines=10)
file_btn.click(predict_from_file, inputs=file_upload, outputs=file_output)
with gr.TabItem("Text Input"):
text_input = gr.Textbox(label="Enter Log Message", lines=3)
text_btn = gr.Button("Predict")
text_output = gr.Textbox(label="Output", lines=5)
text_btn.click(predict_from_text, inputs=text_input, outputs=text_output)
demo.css += """
.gr-button.gr-button-lg.gr-button-secondary {
display: none !important;
}
"""
# Launch the app
demo.launch() |