reach-vb HF staff commited on
Commit
fc3cb88
·
1 Parent(s): dec929a
Files changed (2) hide show
  1. handler.py +36 -0
  2. requirements.txt +2 -0
handler.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from transformers import AutoProcessor, MusicgenForConditionalGeneration
3
+ import torch
4
+
5
+ class EndpointHandler:
6
+ def __init__(self, path="facebook/musicgen-large"):
7
+ # load model and processor from path
8
+ self.processor = AutoProcessor.from_pretrained(path)
9
+ self.model = MusicgenForConditionalGeneration.from_pretrained(path, torch_dtype=torch.float16).to("cuda")
10
+
11
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
12
+ """
13
+ Args:
14
+ data (:dict:):
15
+ The payload with the text prompt and generation parameters.
16
+ """
17
+ # process input
18
+ inputs = data.pop("inputs", data)
19
+ parameters = data.pop("parameters", None)
20
+
21
+ # preprocess
22
+ inputs = self.processor(
23
+ text=[inputs],
24
+ padding=True,
25
+ return_tensors="pt",).to("cuda")
26
+
27
+ # pass inputs with all kwargs in data
28
+ if parameters is not None:
29
+ outputs = self.model.generate(**inputs, **parameters)
30
+ else:
31
+ outputs = self.model.generate(**inputs,)
32
+
33
+ # postprocess the prediction
34
+ prediction = outputs[0].cpu().numpy()
35
+
36
+ return [{"generated_audio": prediction}]
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers==4.31.0
2
+ accelerate>=0.20.3