Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,12 +10,14 @@ from tqdm import tqdm
|
|
| 10 |
from dotenv import load_dotenv
|
| 11 |
from pydantic import BaseModel
|
| 12 |
import asyncio
|
|
|
|
| 13 |
|
| 14 |
load_dotenv()
|
| 15 |
os.system("pip install --upgrade llama-cpp-python")
|
| 16 |
|
| 17 |
app = FastAPI()
|
| 18 |
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
|
|
|
| 19 |
|
| 20 |
global_data = {
|
| 21 |
'model_configs': [
|
|
@@ -123,6 +125,7 @@ iface = gr.Interface(
|
|
| 123 |
live=False
|
| 124 |
)
|
| 125 |
|
|
|
|
| 126 |
if __name__ == "__main__":
|
| 127 |
port = int(os.environ.get("PORT", 7860))
|
| 128 |
iface.launch(server_port=port)
|
|
|
|
| 10 |
from dotenv import load_dotenv
|
| 11 |
from pydantic import BaseModel
|
| 12 |
import asyncio
|
| 13 |
+
from huggingface_hub import login
|
| 14 |
|
| 15 |
load_dotenv()
|
| 16 |
os.system("pip install --upgrade llama-cpp-python")
|
| 17 |
|
| 18 |
app = FastAPI()
|
| 19 |
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
| 20 |
+
login(token=HUGGINGFACE_TOKEN)
|
| 21 |
|
| 22 |
global_data = {
|
| 23 |
'model_configs': [
|
|
|
|
| 125 |
live=False
|
| 126 |
)
|
| 127 |
|
| 128 |
+
@spaces.GPU()
|
| 129 |
if __name__ == "__main__":
|
| 130 |
port = int(os.environ.get("PORT", 7860))
|
| 131 |
iface.launch(server_port=port)
|