Update README.md
Browse files
README.md
CHANGED
|
@@ -27,6 +27,7 @@ This model is a fine-tuned version of [unsloth/Phi-3-mini-4k-instruct-bnb-4bit](
|
|
| 27 |
|
| 28 |
## 🧠 How to Use
|
| 29 |
|
|
|
|
| 30 |
from unsloth import FastLanguageModel
|
| 31 |
from transformers import AutoTokenizer
|
| 32 |
import torch
|
|
@@ -34,7 +35,7 @@ import gradio as gr
|
|
| 34 |
|
| 35 |
# 🔃 Load model and tokenizer from Hugging Face
|
| 36 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 37 |
-
model_name="sreebhargav/finetuned-phi3-cli", #
|
| 38 |
max_seq_length=2048,
|
| 39 |
load_in_4bit=True,
|
| 40 |
device_map="auto"
|
|
@@ -70,4 +71,4 @@ gr.Interface(
|
|
| 70 |
outputs=gr.Textbox(label="🧠 AI Response"),
|
| 71 |
title="🧠 CLI Assistant - Phi-3 Mini + Unsloth",
|
| 72 |
description="Ask your command-line questions. This model was fine-tuned with QLoRA using Unsloth."
|
| 73 |
-
).launch(share=True)
|
|
|
|
| 27 |
|
| 28 |
## 🧠 How to Use
|
| 29 |
|
| 30 |
+
```python
|
| 31 |
from unsloth import FastLanguageModel
|
| 32 |
from transformers import AutoTokenizer
|
| 33 |
import torch
|
|
|
|
| 35 |
|
| 36 |
# 🔃 Load model and tokenizer from Hugging Face
|
| 37 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 38 |
+
model_name="sreebhargav/finetuned-phi3-cli", # Your HF model path
|
| 39 |
max_seq_length=2048,
|
| 40 |
load_in_4bit=True,
|
| 41 |
device_map="auto"
|
|
|
|
| 71 |
outputs=gr.Textbox(label="🧠 AI Response"),
|
| 72 |
title="🧠 CLI Assistant - Phi-3 Mini + Unsloth",
|
| 73 |
description="Ask your command-line questions. This model was fine-tuned with QLoRA using Unsloth."
|
| 74 |
+
).launch(share=True)
|