Spaces:
Runtime error
Runtime error
File size: 7,455 Bytes
95efa57 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 |
#!/usr/bin/env python3
"""
Client for testing the ChatGPT Oasis Model Inference API deployed on Hugging Face Spaces
"""
import requests
import base64
import json
from PIL import Image
import io
import os
import time
class HuggingFaceSpacesClient:
def __init__(self, space_url):
"""
Initialize the client with your Hugging Face Space URL
Args:
space_url (str): Your Space URL (e.g., "https://your-username-chatgpt-oasis.hf.space")
"""
self.base_url = space_url.rstrip('/')
def health_check(self):
"""Check if the API is healthy and models are loaded"""
try:
response = requests.get(f"{self.base_url}/health", timeout=30)
print(f"Health Check Status: {response.status_code}")
print(f"Response: {json.dumps(response.json(), indent=2)}")
return response.status_code == 200
except Exception as e:
print(f"Health check error: {e}")
return False
def list_models(self):
"""Get information about available models"""
try:
response = requests.get(f"{self.base_url}/models", timeout=30)
print(f"Models Status: {response.status_code}")
print(f"Available Models: {json.dumps(response.json(), indent=2)}")
return response.json()
except Exception as e:
print(f"Models list error: {e}")
return None
def predict_file_upload(self, image_path, model_name="oasis500m"):
"""
Predict using file upload
Args:
image_path (str): Path to the image file
model_name (str): Model to use ("oasis500m" or "vit-l-20")
"""
if not os.path.exists(image_path):
print(f"Image file not found: {image_path}")
return None
try:
with open(image_path, 'rb') as f:
files = {'file': (os.path.basename(image_path), f, 'image/jpeg')}
data = {'model_name': model_name}
print(f"Uploading {image_path} to {model_name}...")
response = requests.post(
f"{self.base_url}/upload_inference",
files=files,
data=data,
timeout=120
)
print(f"Status: {response.status_code}")
if response.status_code == 200:
result = response.json()
print(f"Model used: {result['model_used']}")
print("Top 3 predictions:")
for i, pred in enumerate(result['predictions'][:3]):
print(f" {i+1}. {pred['label']} ({pred['confidence']:.3f})")
return result
else:
print(f"Error: {response.text}")
return None
except Exception as e:
print(f"File upload prediction error: {e}")
return None
def predict_base64(self, image_path, model_name="oasis500m"):
"""
Predict using base64 encoded image
Args:
image_path (str): Path to the image file
model_name (str): Model to use ("oasis500m" or "vit-l-20")
"""
if not os.path.exists(image_path):
print(f"Image file not found: {image_path}")
return None
try:
# Load and encode image
image = Image.open(image_path)
buffer = io.BytesIO()
image.save(buffer, format="JPEG")
image_base64 = base64.b64encode(buffer.getvalue()).decode()
print(f"Encoding {image_path} and sending to {model_name}...")
response = requests.post(
f"{self.base_url}/inference",
json={
"image": image_base64,
"model_name": model_name
},
headers={"Content-Type": "application/json"},
timeout=120
)
print(f"Status: {response.status_code}")
if response.status_code == 200:
result = response.json()
print(f"Model used: {result['model_used']}")
print("Top 3 predictions:")
for i, pred in enumerate(result['predictions'][:3]):
print(f" {i+1}. {pred['label']} ({pred['confidence']:.3f})")
return result
else:
print(f"Error: {response.text}")
return None
except Exception as e:
print(f"Base64 prediction error: {e}")
return None
def create_test_image(self, output_path="test_image.jpg"):
"""Create a simple test image for testing"""
# Create a simple colored rectangle
img = Image.new('RGB', (224, 224), color='red')
img.save(output_path, format='JPEG')
print(f"Test image created: {output_path}")
return output_path
def test_all_endpoints(self, image_path=None):
"""Test all endpoints with a given image or create a test image"""
print("=" * 60)
print("ChatGPT Oasis Model Inference API - Hugging Face Spaces Test")
print("=" * 60)
# Test health check
print("\n1. Testing health check...")
if not self.health_check():
print("β Health check failed. Make sure your Space is running!")
return
# Test models list
print("\n2. Testing models list...")
self.list_models()
# Use provided image or create test image
if image_path is None:
print("\n3. Creating test image...")
image_path = self.create_test_image()
else:
print(f"\n3. Using provided image: {image_path}")
# Test both models with file upload
print("\n4. Testing file upload inference...")
for model_name in ["oasis500m", "vit-l-20"]:
print(f"\n--- Testing {model_name} with file upload ---")
self.predict_file_upload(image_path, model_name)
time.sleep(2) # Small delay between requests
# Test both models with base64
print("\n5. Testing base64 inference...")
for model_name in ["oasis500m", "vit-l-20"]:
print(f"\n--- Testing {model_name} with base64 ---")
self.predict_base64(image_path, model_name)
time.sleep(2) # Small delay between requests
print("\n" + "=" * 60)
print("β
Test completed!")
def main():
"""Main function to run the test client"""
# Replace with your actual Hugging Face Space URL
SPACE_URL = "https://your-username-chatgpt-oasis.hf.space"
# Initialize client
client = HuggingFaceSpacesClient(SPACE_URL)
# Test with a specific image if provided
test_image = None # Change this to a path like "your_image.jpg" if you have one
# Run all tests
client.test_all_endpoints(test_image)
if __name__ == "__main__":
main()
|