alibidaran commited on
Commit
0a66e69
·
verified ·
1 Parent(s): 4609b38

Upload 3 files

Browse files
Files changed (3) hide show
  1. DockerFile +31 -0
  2. app.py +30 -0
  3. start_services.sh +9 -0
DockerFile ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Base image for Python
2
+ FROM python:3.10-slim
3
+
4
+ # Install dependencies for Ollama
5
+ RUN apt-get update && apt-get install -y curl unzip && rm -rf /var/lib/apt/lists/*
6
+
7
+ # Install Ollama server
8
+ RUN curl -L https://ollama.com/download/linux-x64 -o ollama.zip \
9
+ && unzip ollama.zip -d /usr/local/bin \
10
+ && rm ollama.zip
11
+
12
+ # Set working directory for the app
13
+ WORKDIR /app
14
+
15
+ # Copy application files and model
16
+ COPY . /app
17
+
18
+ # Ensure the model file is copied into the container
19
+ COPY . /app/
20
+
21
+ # Install Python dependencies
22
+ RUN pip install --no-cache-dir gradio requests
23
+
24
+ # Expose ports for Gradio and Ollama
25
+ EXPOSE 7860 11434
26
+
27
+ # Start both services using a script
28
+ COPY start_services.sh /app/start_services.sh
29
+ RUN chmod +x /app/start_services.sh
30
+
31
+ CMD ["/app/start_services.sh"]
app.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+
4
+ # Function to interact with the Ollama backend
5
+ def chat_with_ollama(message):
6
+ try:
7
+ # Ollama server running on localhost:11434
8
+ ollama_endpoint = "http://localhost:11434/api/generate"
9
+ payload = {"message": message}
10
+
11
+ response = requests.post(ollama_endpoint, json=payload)
12
+ response_data = response.json()
13
+
14
+ if response.status_code == 200:
15
+ return response_data.get("response", "No response from Ollama.")
16
+ else:
17
+ return f"Error: {response_data.get('error', 'Unknown error')}"
18
+ except Exception as e:
19
+ return f"An error occurred: {str(e)}"
20
+
21
+ # Gradio interface
22
+ iface = gr.Interface(
23
+ fn=chat_with_ollama,
24
+ inputs=gr.Textbox(lines=2, placeholder="Type your message..."),
25
+ outputs="text",
26
+ title="Gradio Chatbot with Ollama Backend"
27
+ )
28
+
29
+ if __name__ == "__main__":
30
+ iface.launch(server_name="0.0.0.0", server_port=7860)
start_services.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Start the Ollama server with the custom model
4
+ ollama serve --port 11434 &
5
+ sleep 1
6
+ ollama run hf.co/alibidaran/LLAMA3.2-Virtual_doctor_GGUF
7
+ ollama list
8
+ # Start the Gradio chatbot
9
+ python app.py