GueuleDange commited on
Commit
fbb1dd6
·
verified ·
1 Parent(s): 83bdff8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -15
app.py CHANGED
@@ -1,30 +1,36 @@
1
  from fastapi import FastAPI, Request
2
  from fastapi.responses import StreamingResponse, HTMLResponse
3
- from fastapi.staticfiles import StaticFiles
4
  from fastapi.templating import Jinja2Templates
5
  from transformers import AutoTokenizer, AutoModelForCausalLM
6
  import torch
7
  import asyncio
 
 
 
 
8
 
9
  app = FastAPI()
10
- app.mount("/static", StaticFiles(directory="static"), name="static")
11
  templates = Jinja2Templates(directory="templates")
12
 
13
- # Charger le modèle public (sans token)
14
  model_name = "microsoft/Phi-3.5-mini-instruct"
15
- tokenizer = AutoTokenizer.from_pretrained(model_name)
16
- model = AutoModelForCausalLM.from_pretrained(
17
- model_name,
18
- torch_dtype=torch.float16,
19
- device_map="auto"
20
- )
 
 
 
 
 
21
 
22
  async def generate_response(prompt: str):
23
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
24
-
25
- # Génération token par token
26
- with torch.no_grad():
27
- for _ in range(512): # Limite de tokens
28
  outputs = model.generate(
29
  **inputs,
30
  max_new_tokens=1,
@@ -35,8 +41,9 @@ async def generate_response(prompt: str):
35
  new_token = tokenizer.decode(outputs[0][-1], skip_special_tokens=True)
36
  yield f"data: {new_token}\n\n"
37
  await asyncio.sleep(0.05)
38
-
39
  inputs = {"input_ids": outputs}
 
 
40
 
41
  @app.get("/", response_class=HTMLResponse)
42
  async def home(request: Request):
 
1
  from fastapi import FastAPI, Request
2
  from fastapi.responses import StreamingResponse, HTMLResponse
 
3
  from fastapi.templating import Jinja2Templates
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
  import torch
6
  import asyncio
7
+ import os
8
+
9
+ # Créer le dossier static s'il n'existe pas
10
+ os.makedirs("static", exist_ok=True)
11
 
12
  app = FastAPI()
 
13
  templates = Jinja2Templates(directory="templates")
14
 
15
+ # Configuration simplifiée pour Hugging Face Spaces
16
  model_name = "microsoft/Phi-3.5-mini-instruct"
17
+
18
+ try:
19
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
20
+ model = AutoModelForCausalLM.from_pretrained(
21
+ model_name,
22
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
23
+ device_map="auto"
24
+ )
25
+ except Exception as e:
26
+ print(f"Erreur de chargement du modèle: {str(e)}")
27
+ raise
28
 
29
  async def generate_response(prompt: str):
30
+ try:
31
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
32
+
33
+ for _ in range(512):
 
34
  outputs = model.generate(
35
  **inputs,
36
  max_new_tokens=1,
 
41
  new_token = tokenizer.decode(outputs[0][-1], skip_special_tokens=True)
42
  yield f"data: {new_token}\n\n"
43
  await asyncio.sleep(0.05)
 
44
  inputs = {"input_ids": outputs}
45
+ except Exception as e:
46
+ yield f"data: [ERREUR: {str(e)}]\n\n"
47
 
48
  @app.get("/", response_class=HTMLResponse)
49
  async def home(request: Request):