Update app.py
Browse files
app.py
CHANGED
@@ -12,15 +12,15 @@ from transformers import pipeline # le framework de huggingface
|
|
12 |
|
13 |
app = FastAPI()
|
14 |
|
15 |
-
#
|
16 |
#deepneurones = pipeline("automatic-speech-recognition")# la liste des pipelines de huggingface est disponible ici :https://huggingface.co/docs/transformers/quicktour. pipeline() telecharge dans un cache local le modele deeplearning
|
17 |
-
deepneurones= pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h")
|
18 |
-
|
19 |
@app.get("/healthcheck")
|
20 |
def healthcheck():
|
21 |
|
22 |
-
#output =
|
23 |
-
|
24 |
#pipeline("file.wav")
|
25 |
return {"output":"OK"}
|
26 |
@app.post("/stt")
|
|
|
12 |
|
13 |
app = FastAPI()
|
14 |
|
15 |
+
#deepneurones = pipeline("text2text-generation", model="google/flan-t5-small")
|
16 |
#deepneurones = pipeline("automatic-speech-recognition")# la liste des pipelines de huggingface est disponible ici :https://huggingface.co/docs/transformers/quicktour. pipeline() telecharge dans un cache local le modele deeplearning
|
17 |
+
#deepneurones= pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") # il faut choisir un modele
|
18 |
+
deepneurones = Pipeline.from_pretrained("pyannote/speaker-diarization")
|
19 |
@app.get("/healthcheck")
|
20 |
def healthcheck():
|
21 |
|
22 |
+
#output = deepneurones(input)
|
23 |
+
|
24 |
#pipeline("file.wav")
|
25 |
return {"output":"OK"}
|
26 |
@app.post("/stt")
|