Update app.py
Browse files
app.py
CHANGED
|
@@ -54,8 +54,23 @@ class JarvisModels:
|
|
| 54 |
self.model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
|
| 55 |
|
| 56 |
async def generate_response(self, prompt):
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
async def transcribe_audio(self, audio_file):
|
| 61 |
input_audio, _ = torchaudio.load(audio_file)
|
|
|
|
| 54 |
self.model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
|
| 55 |
|
| 56 |
async def generate_response(self, prompt):
|
| 57 |
+
# Logika untuk menghasilkan tanggapan
|
| 58 |
+
generate_kwargs = dict(
|
| 59 |
+
temperature=0.6,
|
| 60 |
+
max_new_tokens=256,
|
| 61 |
+
top_p=0.95,
|
| 62 |
+
repetition_penalty=1,
|
| 63 |
+
do_sample=True,
|
| 64 |
+
seed=42,
|
| 65 |
+
)
|
| 66 |
+
formatted_prompt = system_instructions1 + prompt + "[JARVIS]"
|
| 67 |
+
stream = self.client1.text_generation(
|
| 68 |
+
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
| 69 |
+
output = ""
|
| 70 |
+
for response in stream:
|
| 71 |
+
output += response.token.text
|
| 72 |
+
|
| 73 |
+
return output
|
| 74 |
|
| 75 |
async def transcribe_audio(self, audio_file):
|
| 76 |
input_audio, _ = torchaudio.load(audio_file)
|