oncu commited on
Commit
223ed76
·
verified ·
1 Parent(s): ab15e90

Upload 7 files

Browse files
Files changed (8) hide show
  1. .gitattributes +1 -0
  2. Dockerfile +65 -0
  3. README.md +14 -7
  4. app.py +441 -0
  5. docker-compose.yml +16 -0
  6. error.png +3 -0
  7. groups_merged.txt +0 -0
  8. start.sh +21 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ error.png filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:12.8.0-cudnn-devel-ubuntu24.04
2
+
3
+ ENV DEBIAN_FRONTEND=noninteractive
4
+ RUN apt-get update && \
5
+ apt-get upgrade -y && \
6
+ apt-get install -y --no-install-recommends --fix-missing \
7
+ git \
8
+ git-lfs \
9
+ wget \
10
+ curl \
11
+ cmake \
12
+ # python build dependencies \
13
+ build-essential \
14
+ libssl-dev \
15
+ zlib1g-dev \
16
+ libbz2-dev \
17
+ libreadline-dev \
18
+ libsqlite3-dev \
19
+ libncursesw5-dev \
20
+ xz-utils \
21
+ tk-dev \
22
+ libxml2-dev \
23
+ libxmlsec1-dev \
24
+ libffi-dev \
25
+ liblzma-dev \
26
+ ffmpeg \
27
+ nvidia-driver-570
28
+
29
+ # Check if user with UID 1000 exists, if not create it
30
+ RUN id -u 1000 &>/dev/null || useradd -m -u 1000 user
31
+ USER 1000
32
+ ENV HOME=/home/user \
33
+ PATH=/home/user/.local/bin:${PATH}
34
+ WORKDIR ${HOME}/app
35
+
36
+ RUN curl https://pyenv.run | bash
37
+ ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
38
+ ARG PYTHON_VERSION=3.11
39
+ RUN pyenv install ${PYTHON_VERSION} && \
40
+ pyenv global ${PYTHON_VERSION} && \
41
+ pyenv rehash && \
42
+ pip install --no-cache-dir -U pip setuptools wheel && \
43
+ pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.8" "APScheduler"
44
+
45
+ COPY --chown=1000 . ${HOME}/app
46
+ RUN git clone https://github.com/ggerganov/llama.cpp
47
+ RUN pip install -r llama.cpp/requirements.txt
48
+
49
+ COPY groups_merged.txt ${HOME}/app/llama.cpp/
50
+
51
+ ENV PYTHONPATH=${HOME}/app \
52
+ PYTHONUNBUFFERED=1 \
53
+ HF_HUB_ENABLE_HF_TRANSFER=1 \
54
+ GRADIO_ALLOW_FLAGGING=never \
55
+ GRADIO_NUM_PORTS=1 \
56
+ GRADIO_SERVER_NAME=0.0.0.0 \
57
+ GRADIO_THEME=huggingface \
58
+ TQDM_POSITION=-1 \
59
+ TQDM_MININTERVAL=1 \
60
+ SYSTEM=spaces \
61
+ LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH} \
62
+ PATH=/usr/local/nvidia/bin:${PATH}
63
+
64
+ ENTRYPOINT /bin/bash start.sh
65
+
README.md CHANGED
@@ -1,12 +1,19 @@
1
  ---
2
- title: Gguf
3
- emoji: 📊
4
- colorFrom: green
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 5.21.0
8
- app_file: app.py
 
 
 
9
  pinned: false
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
1
  ---
2
+ title: GGUF My Repo
3
+ emoji: 🦙
4
+ colorFrom: gray
5
+ colorTo: pink
6
+ sdk: docker
7
+ hf_oauth: true
8
+ hf_oauth_scopes:
9
+ - read-repos
10
+ - write-repos
11
+ - manage-repos
12
  pinned: false
13
  ---
14
 
15
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
16
+
17
+ To run this space locally:
18
+ 1. Login huggingface CLI: `huggingface-cli login`
19
+ 2. Run command: `HF_TOKEN=$(cat ~/.cache/huggingface/token) docker compose up`
app.py ADDED
@@ -0,0 +1,441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import signal
4
+ os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
5
+ import gradio as gr
6
+ import tempfile
7
+
8
+ from huggingface_hub import HfApi, ModelCard, whoami
9
+ from gradio_huggingfacehub_search import HuggingfaceHubSearch
10
+ from pathlib import Path
11
+ from textwrap import dedent
12
+ from apscheduler.schedulers.background import BackgroundScheduler
13
+
14
+
15
+ # used for restarting the space
16
+ HF_TOKEN = os.environ.get("HF_TOKEN")
17
+ CONVERSION_SCRIPT = "./llama.cpp/convert_hf_to_gguf.py"
18
+
19
+ # escape HTML for logging
20
+ def escape(s: str) -> str:
21
+ s = s.replace("&", "&") # Must be done first!
22
+ s = s.replace("<", "&lt;")
23
+ s = s.replace(">", "&gt;")
24
+ s = s.replace('"', "&quot;")
25
+ s = s.replace("\n", "<br/>")
26
+ return s
27
+
28
+ def generate_importance_matrix(model_path: str, train_data_path: str, output_path: str):
29
+ imatrix_command = [
30
+ "./llama.cpp/llama-imatrix",
31
+ "-m", model_path,
32
+ "-f", train_data_path,
33
+ "-ngl", "99",
34
+ "--output-frequency", "10",
35
+ "-o", output_path,
36
+ ]
37
+
38
+ if not os.path.isfile(model_path):
39
+ raise Exception(f"Model file not found: {model_path}")
40
+
41
+ print("Running imatrix command...")
42
+ process = subprocess.Popen(imatrix_command, shell=False)
43
+
44
+ try:
45
+ process.wait(timeout=60) # added wait
46
+ except subprocess.TimeoutExpired:
47
+ print("Imatrix computation timed out. Sending SIGINT to allow graceful termination...")
48
+ process.send_signal(signal.SIGINT)
49
+ try:
50
+ process.wait(timeout=5) # grace period
51
+ except subprocess.TimeoutExpired:
52
+ print("Imatrix proc still didn't term. Forecfully terming process...")
53
+ process.kill()
54
+
55
+ print("Importance matrix generation completed.")
56
+
57
+ def split_upload_model(model_path: str, outdir: str, repo_id: str, oauth_token: gr.OAuthToken | None, split_max_tensors=256, split_max_size=None):
58
+ print(f"Model path: {model_path}")
59
+ print(f"Output dir: {outdir}")
60
+
61
+ if oauth_token is None or oauth_token.token is None:
62
+ raise ValueError("You have to be logged in.")
63
+
64
+ split_cmd = [
65
+ "./llama.cpp/llama-gguf-split",
66
+ "--split",
67
+ ]
68
+ if split_max_size:
69
+ split_cmd.append("--split-max-size")
70
+ split_cmd.append(split_max_size)
71
+ else:
72
+ split_cmd.append("--split-max-tensors")
73
+ split_cmd.append(str(split_max_tensors))
74
+
75
+ # args for output
76
+ model_path_prefix = '.'.join(model_path.split('.')[:-1]) # remove the file extension
77
+ split_cmd.append(model_path)
78
+ split_cmd.append(model_path_prefix)
79
+
80
+ print(f"Split command: {split_cmd}")
81
+
82
+ result = subprocess.run(split_cmd, shell=False, capture_output=True, text=True)
83
+ print(f"Split command stdout: {result.stdout}")
84
+ print(f"Split command stderr: {result.stderr}")
85
+
86
+ if result.returncode != 0:
87
+ stderr_str = result.stderr.decode("utf-8")
88
+ raise Exception(f"Error splitting the model: {stderr_str}")
89
+ print("Model split successfully!")
90
+
91
+ # remove the original model file if needed
92
+ if os.path.exists(model_path):
93
+ os.remove(model_path)
94
+
95
+ model_file_prefix = model_path_prefix.split('/')[-1]
96
+ print(f"Model file name prefix: {model_file_prefix}")
97
+ sharded_model_files = [f for f in os.listdir(outdir) if f.startswith(model_file_prefix) and f.endswith(".gguf")]
98
+ if sharded_model_files:
99
+ print(f"Sharded model files: {sharded_model_files}")
100
+ api = HfApi(token=oauth_token.token)
101
+ for file in sharded_model_files:
102
+ file_path = os.path.join(outdir, file)
103
+ print(f"Uploading file: {file_path}")
104
+ try:
105
+ api.upload_file(
106
+ path_or_fileobj=file_path,
107
+ path_in_repo=file,
108
+ repo_id=repo_id,
109
+ )
110
+ except Exception as e:
111
+ raise Exception(f"Error uploading file {file_path}: {e}")
112
+ else:
113
+ raise Exception("No sharded files found.")
114
+
115
+ print("Sharded model has been uploaded successfully!")
116
+
117
+ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_repo, train_data_file, split_model, split_max_tensors, split_max_size, oauth_token: gr.OAuthToken | None):
118
+ if oauth_token is None or oauth_token.token is None:
119
+ raise gr.Error("You must be logged in to use GGUF-my-repo")
120
+
121
+ # validate the oauth token
122
+ try:
123
+ whoami(oauth_token.token)
124
+ except Exception as e:
125
+ raise gr.Error("You must be logged in to use GGUF-my-repo")
126
+
127
+ model_name = model_id.split('/')[-1]
128
+
129
+ try:
130
+ api = HfApi(token=oauth_token.token)
131
+
132
+ dl_pattern = ["*.md", "*.json", "*.model"]
133
+
134
+ pattern = (
135
+ "*.safetensors"
136
+ if any(
137
+ file.path.endswith(".safetensors")
138
+ for file in api.list_repo_tree(
139
+ repo_id=model_id,
140
+ recursive=True,
141
+ )
142
+ )
143
+ else "*.bin"
144
+ )
145
+
146
+ dl_pattern += [pattern]
147
+
148
+ if not os.path.exists("downloads"):
149
+ os.makedirs("downloads")
150
+
151
+ if not os.path.exists("outputs"):
152
+ os.makedirs("outputs")
153
+
154
+ with tempfile.TemporaryDirectory(dir="outputs") as outdir:
155
+ fp16 = str(Path(outdir)/f"{model_name}.fp16.gguf")
156
+
157
+ with tempfile.TemporaryDirectory(dir="downloads") as tmpdir:
158
+ # Keep the model name as the dirname so the model name metadata is populated correctly
159
+ local_dir = Path(tmpdir)/model_name
160
+ print(local_dir)
161
+ api.snapshot_download(repo_id=model_id, local_dir=local_dir, local_dir_use_symlinks=False, allow_patterns=dl_pattern)
162
+ print("Model downloaded successfully!")
163
+ print(f"Current working directory: {os.getcwd()}")
164
+ print(f"Model directory contents: {os.listdir(local_dir)}")
165
+
166
+ config_dir = local_dir/"config.json"
167
+ adapter_config_dir = local_dir/"adapter_config.json"
168
+ if os.path.exists(adapter_config_dir) and not os.path.exists(config_dir):
169
+ raise Exception('adapter_config.json is present.<br/><br/>If you are converting a LoRA adapter to GGUF, please use <a href="https://huggingface.co/spaces/ggml-org/gguf-my-lora" target="_blank" style="text-decoration:underline">GGUF-my-lora</a>.')
170
+
171
+ result = subprocess.run([
172
+ "python", CONVERSION_SCRIPT, local_dir, "--outtype", "f16", "--outfile", fp16
173
+ ], shell=False, capture_output=True)
174
+ print(result)
175
+ if result.returncode != 0:
176
+ stderr_str = result.stderr.decode("utf-8")
177
+ raise Exception(f"Error converting to fp16: {stderr_str}")
178
+ print("Model converted to fp16 successfully!")
179
+ print(f"Converted model path: {fp16}")
180
+
181
+ imatrix_path = Path(outdir)/"imatrix.dat"
182
+
183
+ if use_imatrix:
184
+ if train_data_file:
185
+ train_data_path = train_data_file.name
186
+ else:
187
+ train_data_path = "llama.cpp/groups_merged.txt" #fallback calibration dataset
188
+
189
+ print(f"Training data file path: {train_data_path}")
190
+
191
+ if not os.path.isfile(train_data_path):
192
+ raise Exception(f"Training data file not found: {train_data_path}")
193
+
194
+ generate_importance_matrix(fp16, train_data_path, imatrix_path)
195
+ else:
196
+ print("Not using imatrix quantization.")
197
+
198
+ # Quantize the model
199
+ quantized_gguf_name = f"{model_name.lower()}-{imatrix_q_method.lower()}-imat.gguf" if use_imatrix else f"{model_name.lower()}-{q_method.lower()}.gguf"
200
+ quantized_gguf_path = str(Path(outdir)/quantized_gguf_name)
201
+ if use_imatrix:
202
+ quantise_ggml = [
203
+ "./llama.cpp/llama-quantize",
204
+ "--imatrix", imatrix_path, fp16, quantized_gguf_path, imatrix_q_method
205
+ ]
206
+ else:
207
+ quantise_ggml = [
208
+ "./llama.cpp/llama-quantize",
209
+ fp16, quantized_gguf_path, q_method
210
+ ]
211
+ result = subprocess.run(quantise_ggml, shell=False, capture_output=True)
212
+ if result.returncode != 0:
213
+ stderr_str = result.stderr.decode("utf-8")
214
+ raise Exception(f"Error quantizing: {stderr_str}")
215
+ print(f"Quantized successfully with {imatrix_q_method if use_imatrix else q_method} option!")
216
+ print(f"Quantized model path: {quantized_gguf_path}")
217
+
218
+ # Create empty repo
219
+ username = whoami(oauth_token.token)["name"]
220
+ new_repo_url = api.create_repo(repo_id=f"{username}/{model_name}-{imatrix_q_method if use_imatrix else q_method}-GGUF", exist_ok=True, private=private_repo)
221
+ new_repo_id = new_repo_url.repo_id
222
+ print("Repo created successfully!", new_repo_url)
223
+
224
+ try:
225
+ card = ModelCard.load(model_id, token=oauth_token.token)
226
+ except:
227
+ card = ModelCard("")
228
+ if card.data.tags is None:
229
+ card.data.tags = []
230
+ card.data.tags.append("llama-cpp")
231
+ card.data.tags.append("gguf-my-repo")
232
+ card.data.base_model = model_id
233
+ card.text = dedent(
234
+ f"""
235
+ # {new_repo_id}
236
+ This model was converted to GGUF format from [`{model_id}`](https://huggingface.co/{model_id}) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.
237
+ Refer to the [original model card](https://huggingface.co/{model_id}) for more details on the model.
238
+
239
+ ## Use with llama.cpp
240
+ Install llama.cpp through brew (works on Mac and Linux)
241
+
242
+ ```bash
243
+ brew install llama.cpp
244
+
245
+ ```
246
+ Invoke the llama.cpp server or the CLI.
247
+
248
+ ### CLI:
249
+ ```bash
250
+ llama-cli --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -p "The meaning to life and the universe is"
251
+ ```
252
+
253
+ ### Server:
254
+ ```bash
255
+ llama-server --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -c 2048
256
+ ```
257
+
258
+ Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.
259
+
260
+ Step 1: Clone llama.cpp from GitHub.
261
+ ```
262
+ git clone https://github.com/ggerganov/llama.cpp
263
+ ```
264
+
265
+ Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).
266
+ ```
267
+ cd llama.cpp && LLAMA_CURL=1 make
268
+ ```
269
+
270
+ Step 3: Run inference through the main binary.
271
+ ```
272
+ ./llama-cli --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -p "The meaning to life and the universe is"
273
+ ```
274
+ or
275
+ ```
276
+ ./llama-server --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -c 2048
277
+ ```
278
+ """
279
+ )
280
+ readme_path = Path(outdir)/"README.md"
281
+ card.save(readme_path)
282
+
283
+ if split_model:
284
+ split_upload_model(str(quantized_gguf_path), outdir, new_repo_id, oauth_token, split_max_tensors, split_max_size)
285
+ else:
286
+ try:
287
+ print(f"Uploading quantized model: {quantized_gguf_path}")
288
+ api.upload_file(
289
+ path_or_fileobj=quantized_gguf_path,
290
+ path_in_repo=quantized_gguf_name,
291
+ repo_id=new_repo_id,
292
+ )
293
+ except Exception as e:
294
+ raise Exception(f"Error uploading quantized model: {e}")
295
+
296
+ if os.path.isfile(imatrix_path):
297
+ try:
298
+ print(f"Uploading imatrix.dat: {imatrix_path}")
299
+ api.upload_file(
300
+ path_or_fileobj=imatrix_path,
301
+ path_in_repo="imatrix.dat",
302
+ repo_id=new_repo_id,
303
+ )
304
+ except Exception as e:
305
+ raise Exception(f"Error uploading imatrix.dat: {e}")
306
+
307
+ api.upload_file(
308
+ path_or_fileobj=readme_path,
309
+ path_in_repo="README.md",
310
+ repo_id=new_repo_id,
311
+ )
312
+ print(f"Uploaded successfully with {imatrix_q_method if use_imatrix else q_method} option!")
313
+
314
+ # end of the TemporaryDirectory(dir="outputs") block; temporary outputs are deleted here
315
+
316
+ return (
317
+ f'<h1>✅ DONE</h1><br/>Find your repo here: <a href="{new_repo_url}" target="_blank" style="text-decoration:underline">{new_repo_id}</a>',
318
+ "llama.png",
319
+ )
320
+ except Exception as e:
321
+ return (f'<h1>❌ ERROR</h1><br/><pre style="white-space:pre-wrap;">{escape(str(e))}</pre>', "error.png")
322
+
323
+
324
+ css="""/* Custom CSS to allow scrolling */
325
+ .gradio-container {overflow-y: auto;}
326
+ """
327
+ # Create Gradio interface
328
+ with gr.Blocks(css=css) as demo:
329
+ gr.Markdown("You must be logged in to use GGUF-my-repo.")
330
+ gr.LoginButton(min_width=250)
331
+
332
+ model_id = HuggingfaceHubSearch(
333
+ label="Hub Model ID",
334
+ placeholder="Search for model id on Huggingface",
335
+ search_type="model",
336
+ )
337
+
338
+ q_method = gr.Dropdown(
339
+ ["Q2_K", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_K_S", "Q4_K_M", "Q5_0", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0"],
340
+ label="Quantization Method",
341
+ info="GGML quantization type",
342
+ value="Q4_K_M",
343
+ filterable=False,
344
+ visible=True
345
+ )
346
+
347
+ imatrix_q_method = gr.Dropdown(
348
+ ["IQ3_M", "IQ3_XXS", "Q4_K_M", "Q4_K_S", "IQ4_NL", "IQ4_XS", "Q5_K_M", "Q5_K_S"],
349
+ label="Imatrix Quantization Method",
350
+ info="GGML imatrix quants type",
351
+ value="IQ4_NL",
352
+ filterable=False,
353
+ visible=False
354
+ )
355
+
356
+ use_imatrix = gr.Checkbox(
357
+ value=False,
358
+ label="Use Imatrix Quantization",
359
+ info="Use importance matrix for quantization."
360
+ )
361
+
362
+ private_repo = gr.Checkbox(
363
+ value=False,
364
+ label="Private Repo",
365
+ info="Create a private repo under your username."
366
+ )
367
+
368
+ train_data_file = gr.File(
369
+ label="Training Data File",
370
+ file_types=["txt"],
371
+ visible=False
372
+ )
373
+
374
+ split_model = gr.Checkbox(
375
+ value=False,
376
+ label="Split Model",
377
+ info="Shard the model using gguf-split."
378
+ )
379
+
380
+ split_max_tensors = gr.Number(
381
+ value=256,
382
+ label="Max Tensors per File",
383
+ info="Maximum number of tensors per file when splitting model.",
384
+ visible=False
385
+ )
386
+
387
+ split_max_size = gr.Textbox(
388
+ label="Max File Size",
389
+ info="Maximum file size when splitting model (--split-max-size). May leave empty to use the default. Accepted suffixes: M, G. Example: 256M, 5G",
390
+ visible=False
391
+ )
392
+
393
+ def update_visibility(use_imatrix):
394
+ return gr.update(visible=not use_imatrix), gr.update(visible=use_imatrix), gr.update(visible=use_imatrix)
395
+
396
+ use_imatrix.change(
397
+ fn=update_visibility,
398
+ inputs=use_imatrix,
399
+ outputs=[q_method, imatrix_q_method, train_data_file]
400
+ )
401
+
402
+ iface = gr.Interface(
403
+ fn=process_model,
404
+ inputs=[
405
+ model_id,
406
+ q_method,
407
+ use_imatrix,
408
+ imatrix_q_method,
409
+ private_repo,
410
+ train_data_file,
411
+ split_model,
412
+ split_max_tensors,
413
+ split_max_size,
414
+ ],
415
+ outputs=[
416
+ gr.Markdown(label="output"),
417
+ gr.Image(show_label=False),
418
+ ],
419
+ title="Create your own GGUF Quants, blazingly fast ⚡!",
420
+ description="The space takes an HF repo as an input, quantizes it and creates a Public repo containing the selected quant under your HF user namespace.",
421
+ api_name=False
422
+ )
423
+
424
+ def update_split_visibility(split_model):
425
+ return gr.update(visible=split_model), gr.update(visible=split_model)
426
+
427
+ split_model.change(
428
+ fn=update_split_visibility,
429
+ inputs=split_model,
430
+ outputs=[split_max_tensors, split_max_size]
431
+ )
432
+
433
+ def restart_space():
434
+ HfApi().restart_space(repo_id="ggml-org/gguf-my-repo", token=HF_TOKEN, factory_reboot=True)
435
+
436
+ scheduler = BackgroundScheduler()
437
+ scheduler.add_job(restart_space, "interval", seconds=21600)
438
+ scheduler.start()
439
+
440
+ # Launch the interface
441
+ demo.queue(default_concurrency_limit=1, max_size=5).launch(debug=True, show_api=False)
docker-compose.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Docker compose file to LOCAL development
2
+
3
+ services:
4
+ gguf-my-repo:
5
+ build:
6
+ context: .
7
+ dockerfile: Dockerfile
8
+ image: gguf-my-repo
9
+ container_name: gguf-my-repo
10
+ ports:
11
+ - "7860:7860"
12
+ volumes:
13
+ - .:/home/user/app
14
+ environment:
15
+ - RUN_LOCALLY=1
16
+ - HF_TOKEN=${HF_TOKEN}
error.png ADDED

Git LFS Details

  • SHA256: de04fcbc70f41e4735ab169480b74eb4e90d76f50d6977a19d04e444cdb0937e
  • Pointer size: 131 Bytes
  • Size of remote file: 740 kB
groups_merged.txt ADDED
The diff for this file is too large to render. See raw diff
 
start.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ if [ ! -d "llama.cpp" ]; then
4
+ # only run in dev env
5
+ git clone https://github.com/ggerganov/llama.cpp
6
+ fi
7
+
8
+ export GGML_CUDA=OFF
9
+ if [[ -z "${RUN_LOCALLY}" ]]; then
10
+ # enable CUDA if NOT running locally
11
+ export GGML_CUDA=ON
12
+ fi
13
+
14
+ cd llama.cpp
15
+ cmake -B build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=${GGML_CUDA}
16
+ cmake --build build --config Release -j --target llama-quantize llama-gguf-split llama-imatrix
17
+ cp ./build/bin/llama-* .
18
+ rm -rf build
19
+
20
+ cd ..
21
+ python app.py