Commit
·
a47fd1a
1
Parent(s):
ff9b931
Update handler.py
Browse files- handler.py +32 -28
handler.py
CHANGED
@@ -2,37 +2,41 @@ from transformers import AutoProcessor, MusicgenForConditionalGeneration
|
|
2 |
import torch
|
3 |
|
4 |
class EndpointHandler:
|
5 |
-
def __init__(self,
|
6 |
-
# Load model and processor
|
7 |
-
self.processor = AutoProcessor.from_pretrained(
|
8 |
-
self.model = MusicgenForConditionalGeneration.from_pretrained(
|
9 |
|
10 |
def __call__(self, data: dict) -> dict:
|
11 |
"""
|
12 |
Args:
|
13 |
data (dict): Contains the text prompt, vibe, style, and public domain song reference.
|
14 |
"""
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
|
|
|
|
|
|
|
|
|
2 |
import torch
|
3 |
|
4 |
class EndpointHandler:
|
5 |
+
def __init__(self, model_path="originstory/holisleigh", use_auth_token=None):
|
6 |
+
# Load model and processor with consistent path
|
7 |
+
self.processor = AutoProcessor.from_pretrained(model_path, use_auth_token=None)
|
8 |
+
self.model = MusicgenForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16, use_auth_token=use_auth_token).to("cuda")
|
9 |
|
10 |
def __call__(self, data: dict) -> dict:
|
11 |
"""
|
12 |
Args:
|
13 |
data (dict): Contains the text prompt, vibe, style, and public domain song reference.
|
14 |
"""
|
15 |
+
try:
|
16 |
+
# Extract user inputs
|
17 |
+
text_prompt = data.get("text_prompt")
|
18 |
+
vibe = data.get("vibe")
|
19 |
+
style = data.get("style")
|
20 |
+
song_reference = data.get("song_reference")
|
21 |
+
|
22 |
+
# Combine user inputs to form the complete prompt
|
23 |
+
combined_prompt = f"{vibe} {style} version of {song_reference}: {text_prompt}"
|
24 |
+
|
25 |
+
# Process the prompt
|
26 |
+
inputs = self.processor(text=[combined_prompt], padding=True, return_tensors="pt").to("cuda")
|
27 |
+
|
28 |
+
# Generate music
|
29 |
+
with torch.autocast("cuda"):
|
30 |
+
audio_output = self.model.generate(**inputs)
|
31 |
+
|
32 |
+
# Convert to suitable format
|
33 |
+
audio_data = audio_output[0].cpu().numpy().tolist()
|
34 |
+
|
35 |
+
# Return generated music
|
36 |
+
return {"generated_audio": audio_data}
|
37 |
+
except Exception as e:
|
38 |
+
# Handle errors
|
39 |
+
return {"error": str(e)}
|
40 |
+
|
41 |
+
# Example usage
|
42 |
+
handler = EndpointHandler()
|