LazarusNLP-indobert-onnx / validate_model.py
asmud's picture
Upload folder using huggingface_hub
e9e7e23 verified
#!/usr/bin/env python3
"""
Validation script for the quantized ONNX LazarusNLP IndoBERT model.
Checks model integrity, performance, and accuracy.
"""
import onnxruntime as ort
from transformers import AutoTokenizer
import numpy as np
import json
import os
import time
import sys
def check_files():
"""Check if all required files are present."""
print("πŸ” Checking required files...")
required_files = [
"model.onnx",
"tokenizer.json",
"tokenizer_config.json",
"special_tokens_map.json",
"vocab.txt",
"config.json",
"README.md"
]
missing_files = []
file_sizes = {}
for file in required_files:
if os.path.exists(file):
file_sizes[file] = os.path.getsize(file)
print(f"βœ… {file} ({file_sizes[file] / (1024*1024):.1f} MB)")
else:
missing_files.append(file)
print(f"❌ {file} - MISSING")
if missing_files:
print(f"\n❌ Missing files: {missing_files}")
return False, {}
print("βœ… All required files present")
return True, file_sizes
def check_model_loading():
"""Test model and tokenizer loading."""
print("\nπŸ”„ Testing model loading...")
try:
# Load tokenizer
start_time = time.time()
tokenizer = AutoTokenizer.from_pretrained("./")
tokenizer_time = time.time() - start_time
print(f"βœ… Tokenizer loaded ({tokenizer_time:.3f}s)")
# Load ONNX model
start_time = time.time()
session = ort.InferenceSession("model.onnx")
model_time = time.time() - start_time
print(f"βœ… ONNX model loaded ({model_time:.3f}s)")
# Check model inputs/outputs
inputs = session.get_inputs()
outputs = session.get_outputs()
print(f"βœ… Model inputs: {[inp.name for inp in inputs]}")
print(f"βœ… Model outputs: {[out.name for out in outputs]}")
return True, session, tokenizer
except Exception as e:
print(f"❌ Model loading failed: {e}")
return False, None, None
def test_basic_inference(session, tokenizer):
"""Test basic model inference."""
print("\nπŸ§ͺ Testing basic inference...")
test_texts = [
"Halo",
"Ini adalah tes sederhana.",
"Teknologi AI berkembang pesat di Indonesia.",
"Model machine learning membantu analisis data besar untuk memberikan insight yang berharga."
]
results = []
for i, text in enumerate(test_texts):
try:
# Tokenize
inputs = tokenizer(text, return_tensors="np", padding=True, truncation=True)
# Inference
start_time = time.time()
outputs = session.run(None, {
'input_ids': inputs['input_ids'],
'attention_mask': inputs['attention_mask']
})
inference_time = time.time() - start_time
# Check output
embeddings = outputs[0]
token_count = inputs['input_ids'].shape[1]
results.append({
'text': text,
'tokens': token_count,
'output_shape': embeddings.shape,
'inference_time': inference_time,
'has_nan': np.isnan(embeddings).any(),
'has_inf': np.isinf(embeddings).any(),
'output_range': [float(embeddings.min()), float(embeddings.max())]
})
print(f"βœ… Test {i+1}: {token_count} tokens β†’ {embeddings.shape} ({inference_time:.4f}s)")
except Exception as e:
print(f"❌ Test {i+1} failed: {e}")
return False, []
return True, results
def test_batch_processing(session, tokenizer):
"""Test batch processing capability."""
print("\nπŸ“¦ Testing batch processing...")
batch_texts = [
"Kalimat pertama untuk tes batch.",
"Ini adalah kalimat kedua yang sedikit lebih panjang.",
"Kalimat ketiga dengan panjang yang berbeda lagi untuk menguji padding.",
"Terakhir, kalimat keempat."
]
try:
# Batch processing
inputs = tokenizer(batch_texts, return_tensors="np", padding=True, truncation=True)
start_time = time.time()
outputs = session.run(None, {
'input_ids': inputs['input_ids'],
'attention_mask': inputs['attention_mask']
})
batch_time = time.time() - start_time
embeddings = outputs[0]
print(f"βœ… Batch shape: {embeddings.shape}")
print(f"βœ… Batch time: {batch_time:.4f}s")
print(f"βœ… Avg per item: {batch_time/len(batch_texts):.4f}s")
# Verify each item in batch
for i in range(len(batch_texts)):
item_embedding = embeddings[i]
if np.isnan(item_embedding).any() or np.isinf(item_embedding).any():
print(f"❌ Batch item {i} has invalid values")
return False
print("βœ… All batch items valid")
return True
except Exception as e:
print(f"❌ Batch processing failed: {e}")
return False
def test_edge_cases(session, tokenizer):
"""Test edge cases and error handling."""
print("\n🚧 Testing edge cases...")
edge_cases = [
("Empty string", ""),
("Single character", "a"),
("Numbers only", "123456789"),
("Punctuation", "!!!???..."),
("Mixed script", "Hello dunia 123 !@#"),
("Very long", "Kata " * 100), # ~400 characters
("Special tokens", "[CLS] [SEP] [MASK] [PAD] [UNK]")
]
passed = 0
total = len(edge_cases)
for name, text in edge_cases:
try:
inputs = tokenizer(text, return_tensors="np", padding=True, truncation=True)
outputs = session.run(None, {
'input_ids': inputs['input_ids'],
'attention_mask': inputs['attention_mask']
})
embeddings = outputs[0]
# Check for valid output
if embeddings.shape[0] == 1 and embeddings.shape[2] == 768:
if not (np.isnan(embeddings).any() or np.isinf(embeddings).any()):
print(f"βœ… {name}: {embeddings.shape}")
passed += 1
else:
print(f"❌ {name}: Invalid values (NaN/Inf)")
else:
print(f"❌ {name}: Wrong shape {embeddings.shape}")
except Exception as e:
print(f"❌ {name}: {e}")
print(f"\nβœ… Edge cases passed: {passed}/{total}")
return passed == total
def performance_benchmark(session, tokenizer):
"""Run performance benchmark."""
print("\n⚑ Performance benchmark...")
# Test different text lengths
test_cases = [
("Short (5 tokens)", "Halo dunia!"),
("Medium (15 tokens)", "Teknologi AI berkembang sangat pesat di era digital modern."),
("Long (50+ tokens)", " ".join(["Kalimat panjang dengan banyak kata untuk menguji performa model dalam memproses teks yang lebih kompleks dan detail."] * 2))
]
benchmark_results = {}
for name, text in test_cases:
times = []
token_count = len(tokenizer.encode(text))
# Warm up
inputs = tokenizer(text, return_tensors="np", padding=True, truncation=True)
session.run(None, {
'input_ids': inputs['input_ids'],
'attention_mask': inputs['attention_mask']
})
# Benchmark runs
for _ in range(20):
inputs = tokenizer(text, return_tensors="np", padding=True, truncation=True)
start_time = time.time()
outputs = session.run(None, {
'input_ids': inputs['input_ids'],
'attention_mask': inputs['attention_mask']
})
times.append(time.time() - start_time)
avg_time = np.mean(times)
std_time = np.std(times)
tokens_per_sec = token_count / avg_time
benchmark_results[name] = {
'avg_time': avg_time,
'std_time': std_time,
'token_count': token_count,
'tokens_per_sec': tokens_per_sec
}
print(f"βœ… {name}: {avg_time:.4f}s Β± {std_time:.4f}s ({tokens_per_sec:.1f} tokens/s)")
return benchmark_results
def check_config_consistency():
"""Check configuration file consistency."""
print("\nπŸ”§ Checking configuration consistency...")
try:
# Load configurations
with open("config.json", "r") as f:
config = json.load(f)
with open("tokenizer_config.json", "r") as f:
tokenizer_config = json.load(f)
with open("export_config.json", "r") as f:
export_config = json.load(f)
# Check consistency
issues = []
# Max length consistency
model_max_pos = config.get("max_position_embeddings", 512)
tokenizer_max = tokenizer_config.get("model_max_length", 512)
if model_max_pos != tokenizer_max:
issues.append(f"Max length mismatch: model={model_max_pos}, tokenizer={tokenizer_max}")
# Check unlimited length setting
unlimited = export_config.get("unlimited_length", False)
dynamic_axes = export_config.get("dynamic_axes", False)
if unlimited and not dynamic_axes:
issues.append("Unlimited length enabled but dynamic_axes is False")
# Check quantization info
if "quantization" not in config:
issues.append("Missing quantization information in config")
if issues:
for issue in issues:
print(f"⚠️ {issue}")
else:
print("βœ… All configurations consistent")
return len(issues) == 0
except Exception as e:
print(f"❌ Config check failed: {e}")
return False
def generate_validation_report(results):
"""Generate validation report."""
print("\nπŸ“Š VALIDATION REPORT")
print("=" * 60)
# Summary
all_passed = all([
results.get('files_ok', False),
results.get('loading_ok', False),
results.get('inference_ok', False),
results.get('batch_ok', False),
results.get('edge_cases_ok', False),
results.get('config_ok', False)
])
status = "βœ… PASSED" if all_passed else "❌ FAILED"
print(f"Overall Status: {status}")
print(f"\nFile Check: {'βœ… PASSED' if results.get('files_ok') else '❌ FAILED'}")
print(f"Model Loading: {'βœ… PASSED' if results.get('loading_ok') else '❌ FAILED'}")
print(f"Basic Inference: {'βœ… PASSED' if results.get('inference_ok') else '❌ FAILED'}")
print(f"Batch Processing: {'βœ… PASSED' if results.get('batch_ok') else '❌ FAILED'}")
print(f"Edge Cases: {'βœ… PASSED' if results.get('edge_cases_ok') else '❌ FAILED'}")
print(f"Config Consistency: {'βœ… PASSED' if results.get('config_ok') else '❌ FAILED'}")
# Performance summary
if 'benchmark' in results:
print(f"\n⚑ PERFORMANCE SUMMARY")
for name, data in results['benchmark'].items():
print(f"{name}: {data['avg_time']:.4f}s ({data['tokens_per_sec']:.1f} tokens/s)")
# File sizes
if 'file_sizes' in results:
total_size = sum(results['file_sizes'].values()) / (1024*1024)
print(f"\nπŸ“ Total model size: {total_size:.1f} MB")
print("=" * 60)
return all_passed
def main():
"""Run complete model validation."""
print("πŸ” LazarusNLP IndoBERT ONNX - Model Validation")
print("=" * 60)
results = {}
# Check files
files_ok, file_sizes = check_files()
results['files_ok'] = files_ok
results['file_sizes'] = file_sizes
if not files_ok:
print("\n❌ Validation failed: Missing required files")
return False
# Check model loading
loading_ok, session, tokenizer = check_model_loading()
results['loading_ok'] = loading_ok
if not loading_ok:
print("\n❌ Validation failed: Model loading error")
return False
# Test inference
inference_ok, inference_results = test_basic_inference(session, tokenizer)
results['inference_ok'] = inference_ok
results['inference_results'] = inference_results
# Test batch processing
batch_ok = test_batch_processing(session, tokenizer)
results['batch_ok'] = batch_ok
# Test edge cases
edge_cases_ok = test_edge_cases(session, tokenizer)
results['edge_cases_ok'] = edge_cases_ok
# Performance benchmark
benchmark = performance_benchmark(session, tokenizer)
results['benchmark'] = benchmark
# Check config consistency
config_ok = check_config_consistency()
results['config_ok'] = config_ok
# Generate report
validation_passed = generate_validation_report(results)
# Save results
with open("validation_results.json", "w") as f:
json.dump(results, f, indent=2, default=str)
print(f"\nπŸ’Ύ Validation results saved to validation_results.json")
if validation_passed:
print("πŸŽ‰ Model validation completed successfully!")
return True
else:
print("❌ Model validation failed!")
return False
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)