File size: 1,492 Bytes
49e3c14
 
 
 
 
 
 
 
6f72515
 
49e3c14
 
6f72515
 
49e3c14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.responses import JSONResponse, RedirectResponse
from tempfile import NamedTemporaryFile
import whisper
import torch
from typing import List

# Checking if NVIDIA GPU is available
#torch.cuda.is_available()
#DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

# Load the Whisper model:
#model = whisper.load_model("base", device=DEVICE)
model = whisper.load_model("base")

app = FastAPI()

@app.post("/whisper/")
async def handler(files: List[UploadFile] = File(...)):
    if not files:
        raise HTTPException(status_code=400, detail="No files were provided")

    # For each file, let's store the results in a list of dictionaries.
    results = []

    for file in files:
        # Create a temporary file.
        with NamedTemporaryFile(delete=True) as temp:
            # Write the user's uploaded file to the temporary file.
            with open(temp.name, "wb") as temp_file:
                temp_file.write(file.file.read())
            
            # Let's get the transcript of the temporary file.
            result = model.transcribe(temp.name)

            # Now we can store the result object for this file.
            results.append({
                'filename': file.filename,
                'transcript': result['text'],
            })

    return JSONResponse(content={'results': results})


@app.get("/", response_class=RedirectResponse)
async def redirect_to_docs():
    return "/docs"