File size: 810 Bytes
802a407
55b26e3
 
802a407
55b26e3
802a407
c262d21
 
ec49f72
c262d21
 
 
 
 
 
 
 
802a407
55b26e3
802a407
 
 
46f8fda
e6c35a2
802a407
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import gradio as gr
import os
os.system("pip install ollama")

from ollama import chat

def chat_with_ollama(prompt):
    stream = chat(
        model='hf.co/mlabonne/Meta-Llama-3.1-8B-Instruct-abliterated-GGUF:Q2_K',
        messages=[{'role': 'user', 'content': f'{prompt}'}],
        stream=True,
    )
    output=""
    for chunk in stream:
        print(chunk['message']['content'], end='', flush=True)
        output+=chunk['message']['content']
        yield output

    
# Create a Gradio interface
iface = gr.Interface(
    fn=chat_with_ollama,
    inputs=gr.Textbox(label="Enter your prompt"),
    outputs=gr.Textbox(label="Response from Ollama"),
    title="Ollama Chatbot Client",
    description="A Gradio client to interact with the Ollama server."
)

# Launch the Gradio interface
iface.launch()