File size: 1,423 Bytes
202d927 d6bb9af 231afb1 fa18beb 202d927 b16e5ce fa18beb b16e5ce a2b69dd 9411e9a fa18beb b16e5ce 202d927 a2b69dd b16e5ce 202d927 9411e9a b16e5ce fa18beb b16e5ce fa18beb 202d927 9411e9a fa18beb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import gradio as gr
from llama_cpp import Llama
llm = Llama(
model_path="yugogpt-q4_0.gguf",
n_ctx=2048,
n_threads=4,
n_batch=512,
use_mlock=True,
use_mmap=True
)
def chat(message, history):
system_prompt = """Ti si YugoGPT, AI asistent koji prvenstveno komunicira na srpskom jeziku.
Tvoj zadatak je da daješ detaljne, istinite i korisne odgovore na srpskom jeziku."""
full_prompt = f"""SYSTEM: {system_prompt}
USER: {message}
ASSISTANT: Dozvolite mi da vam odgovorim na srpskom jeziku.
"""
response = llm(
full_prompt,
max_tokens=2048,
temperature=0.7,
top_p=0.95,
repeat_penalty=1.2,
top_k=40,
stop=["USER:", "\n\n"],
stream=True
)
partial_message = ""
for chunk in response:
if chunk and chunk['choices'][0]['text']:
partial_message += chunk['choices'][0]['text']
yield partial_message
demo = gr.ChatInterface(
fn=chat,
title="YugoGPT Asistent",
description="Postavite pitanje na srpskom ili bilo kom jeziku bivše Jugoslavije.",
examples=[
"Objasni kvantno računarstvo",
"Koje su glavne karakteristike veštačke inteligencije?",
"Kako funkcioniše blockchain tehnologija?"
]
)
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
)
|