Hjgugugjhuhjggg's picture
Update app.py
f8a2d64 verified
import os
import tempfile
from textwrap import dedent
from dataclasses import dataclass
import logging
import gradio as gr
from gradio_huggingfacehub_search import HuggingfaceHubSearch
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import HfApi, whoami, ModelCard, scan_cache_dir
from huggingface_hub.utils import RepositoryNotFoundError
import mlx_lm
import mlx_vlm
import mlx.core as mx
from safetensors import safe_open
# Se utiliza el token de HF para el cache inicial (si se tiene configurado)
HF_TOKEN = os.environ.get("HF_TOKEN")
os.environ["HF_HUB_CACHE"] = "cache"
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
QUANT_PARAMS = {"Q2": 2, "Q3": 3, "Q4": 4, "Q6": 6, "Q8": 8}
def list_files_in_folder(folder_path):
return [item for item in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, item))]
def clear_hf_cache_space():
scan = scan_cache_dir()
to_delete = []
for repo in scan.repos:
if repo.repo_type == "model":
to_delete.extend([rev.commit_hash for rev in repo.revisions])
scan.delete_revisions(*to_delete).execute()
print("Cache has been cleared")
def upload_to_hub(path, upload_repo, hf_path, oauth_token):
card = ModelCard.load(hf_path, token=oauth_token.token)
card.data.tags = (["mlx"] if card.data.tags is None else card.data.tags + ["mlx", "mlx-my-repo"])
card.data.base_model = hf_path
card.text = dedent(f"""
# {upload_repo}
The Model [{upload_repo}](https://huggingface.co/{upload_repo}) was converted to MLX format from [{hf_path}](https://huggingface.co/{hf_path}) using mlx-lm version **{mlx_lm.__version__}**.
## Use with mlx
```bash
pip install mlx-lm
```
```python
from mlx_lm import load, generate
model, tokenizer = load("{upload_repo}")
prompt = "hello"
if hasattr(tokenizer, "apply_chat_template") and tokenizer.chat_template is not None:
messages = [{{"role": "user", "content": prompt}}]
prompt = tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
response = generate(model, tokenizer, prompt=prompt, verbose=True)
```
""")
card.save(os.path.join(path, "README.md"))
logging.getLogger().setLevel(logging.INFO)
api = HfApi(token=oauth_token.token)
try:
api.create_repo(repo_id=upload_repo, exist_ok=True)
except Exception as e:
print(f"Error creating repository: {e}")
raise
files = list_files_in_folder(path)
print(files)
for file in files:
file_path = os.path.join(path, file)
print(f"Uploading file: {file_path}")
try:
api.upload_file(path_or_fileobj=file_path, path_in_repo=file, repo_id=upload_repo)
except Exception as e:
print(f"Error uploading {file}: {e}")
print("Skipping this file and continuing...")
print(f"Upload successful, go to https://huggingface.co/{upload_repo} for details.")
def map_tensor_name(key: str) -> str | None:
if not key.endswith(".weight"):
return None
return key
def modify_tensors(mlx_path: str) -> None:
logging.info(f"Modifying tensors in: {mlx_path}")
safetensors_files = [f for f in os.listdir(mlx_path) if f.endswith(".safetensors")]
if not safetensors_files:
raise FileNotFoundError(f"No .safetensors files in {mlx_path}")
for weights_file in safetensors_files:
weights_path = os.path.join(mlx_path, weights_file)
original_state_dict = {}
try:
with safe_open(weights_path, framework="mlx") as f:
for key in f.keys():
try:
tensor = f.get_tensor(key)
original_state_dict[key] = tensor
except Exception as e:
logging.warning(f"Tensor load warning for '{key}' in {weights_file}: {e}")
continue
except Exception as e:
logging.error(f"Safetensors error {weights_path}: {e}")
continue
modified_state_dict = {}
for key, tensor in original_state_dict.items():
new_key = map_tensor_name(key)
if new_key is not None:
modified_state_dict[new_key] = tensor
try:
mx.save_safetensors(weights_path, modified_state_dict)
logging.info(f"Modified {weights_file} successfully.")
except Exception as e:
logging.error(f"Failed to save modified {weights_file}: {e}")
logging.info("Tensor modification complete.")
def process_model(model_id, q_method, oauth_token: gr.OAuthToken | None):
if oauth_token is None or oauth_token.token is None:
return "You must be logged in to use MLX-my-repo", "error.png"
# Se actualiza la variable de entorno HF_TOKEN para que al descargar modelos gated se use la key del perfil
os.environ["HF_TOKEN"] = oauth_token.token
model_name = model_id.split("/")[-1]
try:
username = whoami(oauth_token.token)["name"]
except Exception as e:
return f"Error retrieving user info: {e}", "error.png"
try:
with tempfile.TemporaryDirectory(dir="converted") as tmpdir:
mlx_path = os.path.join(tmpdir, "mlx")
if q_method == "FP16":
upload_repo = f"{username}/{model_name}-mlx-fp16"
try:
mlx_lm.convert(model_id, mlx_path=mlx_path, quantize=False, dtype="float16")
except Exception as e1:
try:
mlx_vlm.convert(model_id, mlx_path=mlx_path, quantize=False, dtype="float16")
except Exception as e2:
mlx_lm.convert(model_id, mlx_path=mlx_path, quantize=False, dtype="float16")
else:
q_bits = QUANT_PARAMS[q_method]
upload_repo = f"{username}/{model_name}-mlx-{q_bits}Bit"
try:
mlx_lm.convert(model_id, mlx_path=mlx_path, quantize=True, q_bits=q_bits)
except Exception as e1:
try:
mlx_vlm.convert(model_id, mlx_path=mlx_path, quantize=True, q_bits=q_bits)
except Exception as e2:
mlx_lm.convert(model_id, mlx_path=mlx_path, quantize=True, q_bits=q_bits)
try:
modify_tensors(mlx_path)
except FileNotFoundError as e:
return f"Error modifying tensors: {e}", "error.png"
except Exception as e:
return f"Error during tensor modification: {e}", "error.png"
upload_to_hub(path=mlx_path, upload_repo=upload_repo, hf_path=model_id, oauth_token=oauth_token)
return (f'Find your repo <a href="https://huggingface.co/{upload_repo}" target="_blank" style="text-decoration:underline">here</a>', "llama.png")
except Exception as e:
return f"Error: {e}", "error.png"
finally:
clear_hf_cache_space()
print("Folder cleaned up successfully!")
css = """.gradio-container { overflow-y: auto; }"""
with gr.Blocks(css=css) as demo:
gr.Markdown("You must be logged in to use MLX-my-repo.")
# Se utiliza el LoginButton para obtener el token del usuario, el cual se usará para acceder a modelos gated
gr.LoginButton(min_width=250)
model_id = HuggingfaceHubSearch(label="Hub Model ID", placeholder="Search for model id on Huggingface", search_type="model")
q_method = gr.Dropdown(["FP16", "Q2", "Q3", "Q4", "Q6", "Q8"], label="Conversion Method", info="MLX conversion type (FP16 for float16, Q2–Q8 for quantized models)", value="Q4", filterable=False, visible=True)
iface = gr.Interface(fn=process_model, inputs=[model_id, q_method], outputs=[gr.Markdown(label="output"), gr.Image(show_label=False)], title="Create your own MLX Models, blazingly fast ⚡!", description="The space takes an HF repo as an input, converts it to MLX format (FP16 or quantized), and creates a Public/Private repo under your HF user namespace.", api_name=False)
def restart_space():
try:
HfApi().restart_space(repo_id="Hjgugugjhuhjggg/mlx-my-repohxhxhhd", token=HF_TOKEN, factory_reboot=True)
except RepositoryNotFoundError:
print("The space 'Hjgugugjhuhjggg/mlx-my-repohxhxhhd' not found. Please check the repo ID and your token.")
except Exception as e:
print(f"An error occurred when restarting space: {e}")
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=21600)
scheduler.start()
demo.queue(default_concurrency_limit=1, max_size=5).launch(debug=True, show_api=False)