hadadrjt commited on
Commit
dd61af5
·
0 Parent(s):

gemma-3-270m: Initial.

Browse files
Files changed (5) hide show
  1. .gitattributes +36 -0
  2. Dockerfile +22 -0
  3. README.md +73 -0
  4. app.py +71 -0
  5. requirements.txt +2 -0
.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.jpg filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # SPDX-FileCopyrightText: Hadad <[email protected]>
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ #
5
+
6
+ # Use a specific container image for the app
7
+ FROM python:latest
8
+
9
+ # Set the main working directory inside the container
10
+ WORKDIR /app
11
+
12
+ # Copy all files into the container
13
+ COPY . .
14
+
15
+ # Install all dependencies
16
+ RUN pip install -r requirements.txt
17
+
18
+ # Open the port so the app can be accessed
19
+ EXPOSE 7860
20
+
21
+ # Start the app
22
+ CMD ["python", "app.py"]
README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: gemma-3-270m
3
+ short_description: Gemma 3 (270M) run on Ollama using a single-core CPU only
4
+ license: gemma
5
+ emoji: ⚡
6
+ colorFrom: red
7
+ colorTo: red
8
+ sdk: docker
9
+ app_port: 7860
10
+ pinned: false
11
+ # Used to promote this Hugging Face Space
12
+ models:
13
+ - hadadrjt/JARVIS
14
+ - agentica-org/DeepCoder-14B-Preview
15
+ - agentica-org/DeepSWE-Preview
16
+ - fka/awesome-chatgpt-prompts
17
+ - black-forest-labs/FLUX.1-Kontext-dev
18
+ - ChatDOC/OCRFlux-3B
19
+ - deepseek-ai/DeepSeek-R1
20
+ - deepseek-ai/DeepSeek-R1-0528
21
+ - deepseek-ai/DeepSeek-R1-Distill-Llama-70B
22
+ - deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
23
+ - deepseek-ai/DeepSeek-R1-0528-Qwen3-8B
24
+ - deepseek-ai/DeepSeek-V3-0324
25
+ - google/gemma-3-1b-it
26
+ - google/gemma-3-27b-it
27
+ - google/gemma-3-4b-it
28
+ - google/gemma-3n-E4B-it
29
+ - google/gemma-3n-E4B-it-litert-preview
30
+ - google/medsiglip-448
31
+ - kyutai/tts-1.6b-en_fr
32
+ - meta-llama/Llama-3.1-8B-Instruct
33
+ - meta-llama/Llama-3.2-3B-Instruct
34
+ - meta-llama/Llama-3.3-70B-Instruct
35
+ - meta-llama/Llama-4-Maverick-17B-128E-Instruct
36
+ - meta-llama/Llama-4-Scout-17B-16E-Instruct
37
+ - microsoft/Phi-4-mini-instruct
38
+ - mistralai/Devstral-Small-2505
39
+ - mistralai/Mistral-Small-3.1-24B-Instruct-2503
40
+ - openai/webgpt_comparisons
41
+ - openai/whisper-large-v3-turbo
42
+ - openai/gpt-oss-120b
43
+ - openai/gpt-oss-20b
44
+ - Qwen/QwQ-32B
45
+ - Qwen/Qwen2.5-VL-32B-Instruct
46
+ - Qwen/Qwen2.5-VL-3B-Instruct
47
+ - Qwen/Qwen2.5-VL-72B-Instruct
48
+ - Qwen/Qwen3-235B-A22B
49
+ - THUDM/GLM-4.1V-9B-Thinking
50
+ - tngtech/DeepSeek-TNG-R1T2-Chimera
51
+ - moonshotai/Kimi-K2-Instruct
52
+ - Qwen/Qwen3-235B-A22B-Instruct-2507
53
+ - Qwen/Qwen3-Coder-480B-A35B-Instruct
54
+ - Qwen/Qwen3-235B-A22B-Thinking-2507
55
+ - zai-org/GLM-4.5
56
+ - zai-org/GLM-4.5-Air
57
+ - zai-org/GLM-4.5V
58
+ - deepseek-ai/DeepSeek-V3.1
59
+ - deepseek-ai/DeepSeek-V3.1-Base
60
+ - microsoft/VibeVoice-1.5B
61
+ - xai-org/grok-2
62
+ - Qwen/Qwen-Image-Edit
63
+ - ByteDance-Seed/Seed-OSS-36B-Instruct
64
+ - google/gemma-3-270m
65
+ - google/gemma-3-270m-it
66
+ - openbmb/MiniCPM-V-4_5
67
+ - tencent/Hunyuan-MT-7B
68
+ - meituan-longcat/LongCat-Flash-Chat
69
+ - Phr00t/WAN2.2-14B-Rapid-AllInOne
70
+ # Used to promote this Hugging Face Space
71
+ datasets:
72
+ - fka/awesome-chatgpt-prompts
73
+ ---
app.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # SPDX-FileCopyrightText: Hadad <[email protected]>
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ #
5
+
6
+ import os # Used for accessing environment variables
7
+ import gradio as gr # Used to create the user interface
8
+
9
+ # Gradio user interface
10
+ with gr.Blocks(
11
+ fill_height=True, # Adjusting to the height of the user's screen
12
+ fill_width=True # Adjusting to the width of the user's screen
13
+ ) as app:
14
+ # Sidebar
15
+ with gr.Sidebar():
16
+ # Project description
17
+ gr.HTML(
18
+ """
19
+ This space run the <b><a href=
20
+ "https://huggingface.co/google/gemma-3-270m"
21
+ target="_blank">Gemma 3 (270M)</a></b> model from
22
+ <b>Google</b>, hosted on a server using <b>Ollama</b> and
23
+ accessed via <b>OpenAI-Style inference.</b><br><br>
24
+ Official <b>documentation</b> for using Ollama with
25
+ OpenAI-Style inference can be found
26
+ <b><a href="https://ollama.com/blog/openai-compatibility"
27
+ target="_blank">here</a></b>.<br><br>
28
+ Gemma 3 (270M) run entirely on <b>CPU</b>, utilizing only a
29
+ <b>single core</b>. This is sufficient due to the small
30
+ size of the model, which makes it possible to operate
31
+ efficiently on minimal hardware.<br><br>
32
+ The Gemma 3 (270M) model can be viewed or downloaded from
33
+ the official Ollama website,
34
+ <b><a href="https://ollama.com/library/gemma3:270m"
35
+ target="_blank">here</a></b>.<br><br>
36
+ Gemma 3 has multimodal capabilities. However, running on
37
+ CPU with a small number of parameters may limit its
38
+ understanding of context. For this reason, the
39
+ upload functionality has been disabled.<br><br>
40
+ <b>Like this project? Feel free to buy me a <a href=
41
+ "https://ko-fi.com/hadad" target="_blank">
42
+ coffee</a></b>.
43
+ """
44
+ )
45
+ # Load chat interface
46
+ gr.load_chat(
47
+ os.getenv("OLLAMA_API_BASE_URL"), # Endpoint
48
+ token=os.getenv("OLLAMA_API_KEY"), # API Key
49
+ model="gemma3:270m", # Model
50
+ chatbot=gr.Chatbot(
51
+ label="Ollama | Gemma 3 (270M)", # Chatbot title
52
+ type="messages", # OpenAI-style messages format
53
+ show_copy_button=True, # Allow users to copy responses
54
+ scale=1 # Standard display scaling
55
+ ),
56
+ file_types=None, # Disable multimodal
57
+ examples=[
58
+ ["Please introduce yourself."],
59
+ ["What caused World War II?"],
60
+ ["Give me a short introduction to large language model."],
61
+ ["Explain about quantum computers."]
62
+ ], # Provide sample inputs for users to try
63
+ cache_examples=False, # Ensure responses always fresh
64
+ show_api=False # Disable Gradio API
65
+ )
66
+
67
+ # Start the app
68
+ app.launch(
69
+ server_name="0.0.0.0", # Listen on all network interfaces
70
+ pwa=True # Progressive Web App
71
+ )
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio[oauth,mcp]
2
+ openai