Spaces:
Runtime error
Runtime error
#!/usr/bin/env python3 | |
""" | |
Client for testing the ChatGPT Oasis Model Inference API deployed on Hugging Face Spaces | |
""" | |
import requests | |
import base64 | |
import json | |
from PIL import Image | |
import io | |
import os | |
import time | |
class HuggingFaceSpacesClient: | |
def __init__(self, space_url): | |
""" | |
Initialize the client with your Hugging Face Space URL | |
Args: | |
space_url (str): Your Space URL (e.g., "https://your-username-chatgpt-oasis.hf.space") | |
""" | |
self.base_url = space_url.rstrip('/') | |
def health_check(self): | |
"""Check if the API is healthy and models are loaded""" | |
try: | |
response = requests.get(f"{self.base_url}/health", timeout=30) | |
print(f"Health Check Status: {response.status_code}") | |
print(f"Response: {json.dumps(response.json(), indent=2)}") | |
return response.status_code == 200 | |
except Exception as e: | |
print(f"Health check error: {e}") | |
return False | |
def list_models(self): | |
"""Get information about available models""" | |
try: | |
response = requests.get(f"{self.base_url}/models", timeout=30) | |
print(f"Models Status: {response.status_code}") | |
print(f"Available Models: {json.dumps(response.json(), indent=2)}") | |
return response.json() | |
except Exception as e: | |
print(f"Models list error: {e}") | |
return None | |
def predict_file_upload(self, image_path, model_name="oasis500m"): | |
""" | |
Predict using file upload | |
Args: | |
image_path (str): Path to the image file | |
model_name (str): Model to use ("oasis500m" or "vit-l-20") | |
""" | |
if not os.path.exists(image_path): | |
print(f"Image file not found: {image_path}") | |
return None | |
try: | |
with open(image_path, 'rb') as f: | |
files = {'file': (os.path.basename(image_path), f, 'image/jpeg')} | |
data = {'model_name': model_name} | |
print(f"Uploading {image_path} to {model_name}...") | |
response = requests.post( | |
f"{self.base_url}/upload_inference", | |
files=files, | |
data=data, | |
timeout=120 | |
) | |
print(f"Status: {response.status_code}") | |
if response.status_code == 200: | |
result = response.json() | |
print(f"Model used: {result['model_used']}") | |
print("Top 3 predictions:") | |
for i, pred in enumerate(result['predictions'][:3]): | |
print(f" {i+1}. {pred['label']} ({pred['confidence']:.3f})") | |
return result | |
else: | |
print(f"Error: {response.text}") | |
return None | |
except Exception as e: | |
print(f"File upload prediction error: {e}") | |
return None | |
def predict_base64(self, image_path, model_name="oasis500m"): | |
""" | |
Predict using base64 encoded image | |
Args: | |
image_path (str): Path to the image file | |
model_name (str): Model to use ("oasis500m" or "vit-l-20") | |
""" | |
if not os.path.exists(image_path): | |
print(f"Image file not found: {image_path}") | |
return None | |
try: | |
# Load and encode image | |
image = Image.open(image_path) | |
buffer = io.BytesIO() | |
image.save(buffer, format="JPEG") | |
image_base64 = base64.b64encode(buffer.getvalue()).decode() | |
print(f"Encoding {image_path} and sending to {model_name}...") | |
response = requests.post( | |
f"{self.base_url}/inference", | |
json={ | |
"image": image_base64, | |
"model_name": model_name | |
}, | |
headers={"Content-Type": "application/json"}, | |
timeout=120 | |
) | |
print(f"Status: {response.status_code}") | |
if response.status_code == 200: | |
result = response.json() | |
print(f"Model used: {result['model_used']}") | |
print("Top 3 predictions:") | |
for i, pred in enumerate(result['predictions'][:3]): | |
print(f" {i+1}. {pred['label']} ({pred['confidence']:.3f})") | |
return result | |
else: | |
print(f"Error: {response.text}") | |
return None | |
except Exception as e: | |
print(f"Base64 prediction error: {e}") | |
return None | |
def create_test_image(self, output_path="test_image.jpg"): | |
"""Create a simple test image for testing""" | |
# Create a simple colored rectangle | |
img = Image.new('RGB', (224, 224), color='red') | |
img.save(output_path, format='JPEG') | |
print(f"Test image created: {output_path}") | |
return output_path | |
def test_all_endpoints(self, image_path=None): | |
"""Test all endpoints with a given image or create a test image""" | |
print("=" * 60) | |
print("ChatGPT Oasis Model Inference API - Hugging Face Spaces Test") | |
print("=" * 60) | |
# Test health check | |
print("\n1. Testing health check...") | |
if not self.health_check(): | |
print("β Health check failed. Make sure your Space is running!") | |
return | |
# Test models list | |
print("\n2. Testing models list...") | |
self.list_models() | |
# Use provided image or create test image | |
if image_path is None: | |
print("\n3. Creating test image...") | |
image_path = self.create_test_image() | |
else: | |
print(f"\n3. Using provided image: {image_path}") | |
# Test both models with file upload | |
print("\n4. Testing file upload inference...") | |
for model_name in ["oasis500m", "vit-l-20"]: | |
print(f"\n--- Testing {model_name} with file upload ---") | |
self.predict_file_upload(image_path, model_name) | |
time.sleep(2) # Small delay between requests | |
# Test both models with base64 | |
print("\n5. Testing base64 inference...") | |
for model_name in ["oasis500m", "vit-l-20"]: | |
print(f"\n--- Testing {model_name} with base64 ---") | |
self.predict_base64(image_path, model_name) | |
time.sleep(2) # Small delay between requests | |
print("\n" + "=" * 60) | |
print("β Test completed!") | |
def main(): | |
"""Main function to run the test client""" | |
# Replace with your actual Hugging Face Space URL | |
SPACE_URL = "https://your-username-chatgpt-oasis.hf.space" | |
# Initialize client | |
client = HuggingFaceSpacesClient(SPACE_URL) | |
# Test with a specific image if provided | |
test_image = None # Change this to a path like "your_image.jpg" if you have one | |
# Run all tests | |
client.test_all_endpoints(test_image) | |
if __name__ == "__main__": | |
main() | |