lyangas's picture
init repo
b269c5d
raw
history blame
2.11 kB
#!/usr/bin/env python3
"""
Main file for launching LLM Structured Output application in Docker
"""
import argparse
import threading
import time
from config import Config
def run_gradio():
"""Launch Gradio interface"""
from app import create_gradio_interface
print(f"🎨 Starting Gradio interface at http://{Config.HOST}:{Config.GRADIO_PORT}")
demo = create_gradio_interface()
demo.launch(
server_name=Config.HOST,
server_port=Config.GRADIO_PORT,
share=False,
debug=False # Disabled debug for production
)
def run_api():
"""Launch FastAPI server"""
import uvicorn
from api import api_app
print(f"πŸ”Œ Starting API at http://{Config.HOST}:{Config.API_PORT}")
uvicorn.run(
api_app,
host=Config.HOST,
port=Config.API_PORT,
log_level="info"
)
def run_both():
"""Launch both services simultaneously"""
print("πŸš€ Starting LLM Structured Output application...")
print("=" * 60)
print(f"πŸ“Š Gradio interface: http://{Config.HOST}:{Config.GRADIO_PORT}")
print(f"πŸ”Œ API: http://{Config.HOST}:{Config.API_PORT}")
print(f"πŸ“– API documentation: http://{Config.HOST}:{Config.API_PORT}/docs")
print("=" * 60)
# Start API in separate thread
api_thread = threading.Thread(target=run_api, daemon=True)
api_thread.start()
# Small delay for API startup
time.sleep(2)
# Start Gradio in main thread
run_gradio()
def main():
"""Main function with command line arguments"""
parser = argparse.ArgumentParser(description="LLM Structured Output application")
parser.add_argument(
"--mode",
choices=["gradio", "api", "both"],
default="gradio", # Default to gradio only for HuggingFace Spaces
help="Launch mode: gradio (interface only), api (API only), both (both services)"
)
args = parser.parse_args()
if args.mode == "gradio":
run_gradio()
elif args.mode == "api":
run_api()
else:
run_both()
if __name__ == "__main__":
main()