Commit
·
cbf672a
1
Parent(s):
2468d85
Fix windows defender problem
Browse fileshad to remove multiple downloads but works now
README.md
CHANGED
@@ -18,7 +18,7 @@ Feel free to send in PRs or use this code however you'd like.\
|
|
18 |
|
19 |
- [EXL2 Single Quant V3](https://colab.research.google.com/drive/1Vc7d6JU3Z35OVHmtuMuhT830THJnzNfS?usp=sharing) **(COLAB)**
|
20 |
|
21 |
-
- [EXL2 Local Quant Windows](https://huggingface.co/Anthonyg5005/hf-scripts/resolve/main/exl2-windows-local/exl2-windows-local.zip?download=true)
|
22 |
|
23 |
- [Upload folder to repo](https://huggingface.co/Anthonyg5005/hf-scripts/blob/main/upload%20folder%20to%20repo.py)
|
24 |
|
|
|
18 |
|
19 |
- [EXL2 Single Quant V3](https://colab.research.google.com/drive/1Vc7d6JU3Z35OVHmtuMuhT830THJnzNfS?usp=sharing) **(COLAB)**
|
20 |
|
21 |
+
- [EXL2 Local Quant Windows](https://huggingface.co/Anthonyg5005/hf-scripts/resolve/main/exl2-windows-local/exl2-windows-local.zip?download=true)
|
22 |
|
23 |
- [Upload folder to repo](https://huggingface.co/Anthonyg5005/hf-scripts/blob/main/upload%20folder%20to%20repo.py)
|
24 |
|
exl2-windows-local/convert-model-auto.bat
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
set /p "model=Folder name: "
|
2 |
set /p "bpw=Target BPW: "
|
3 |
mkdir %model%-exl2-%bpw%bpw
|
|
|
1 |
+
@echo off
|
2 |
+
|
3 |
set /p "model=Folder name: "
|
4 |
set /p "bpw=Target BPW: "
|
5 |
mkdir %model%-exl2-%bpw%bpw
|
exl2-windows-local/download multiple models.ps1
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
# Prompt user for the number of models to download
|
2 |
-
$numberOfModels = Read-Host "Enter the number of models to download"
|
3 |
-
|
4 |
-
# Initialize an array to store model repos
|
5 |
-
$modelRepos = @()
|
6 |
-
|
7 |
-
# Loop to collect model repos
|
8 |
-
for ($i = 1; $i -le $numberOfModels; $i++) {
|
9 |
-
$modelRepo = Read-Host "Enter Model Repo $i"
|
10 |
-
$modelRepos += $modelRepo
|
11 |
-
}
|
12 |
-
|
13 |
-
# Function to download a model in a new PowerShell window
|
14 |
-
function Get-Model {
|
15 |
-
param (
|
16 |
-
[string]$modelRepo
|
17 |
-
)
|
18 |
-
|
19 |
-
# Start a new PowerShell window and execute the download-model.py script
|
20 |
-
Start-Process powershell -ArgumentList "-NoProfile -ExecutionPolicy Bypass -Command .\venv\Scripts\activate.ps1; python.exe download-model.py $modelRepo" -NoNewWindow
|
21 |
-
}
|
22 |
-
|
23 |
-
# Loop through each model repo and download in a new PowerShell window
|
24 |
-
foreach ($repo in $modelRepos) {
|
25 |
-
Get-Model -modelRepo $repo
|
26 |
-
}
|
27 |
-
|
28 |
-
Write-Host "Downloads initiated for $numberOfModels models. Check the progress in the new PowerShell windows."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
exl2-windows-local/download-model.bat
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
|
3 |
+
echo Enter the model repo. User/Repo:Branch (Branch optional)
|
4 |
+
set /p "repo=Model repo: "
|
5 |
+
venv\scripts\python.exe download-model.py %repo%
|
exl2-windows-local/exl2-windows-local.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d1d305f32329e1c812ff858f6ca564c008ce02c0f1837c5f3560c62c7293d945
|
3 |
+
size 5918
|
exl2-windows-local/instructions.txt
CHANGED
@@ -9,5 +9,5 @@ https://developer.nvidia.com/cuda-11-8-0-download-archive?target_os=Windows&targ
|
|
9 |
Haven't done much testing but Visual Studio with desktop development for C++ might be required. I've gotten cl.exe errors on a previous install
|
10 |
|
11 |
make sure you setup the environment by using windows-setup.bat
|
12 |
-
after everything is done just download a model using
|
13 |
to quant, use convert-model-auto.bat. Enter the model's folder name, then the BPW for the model
|
|
|
9 |
Haven't done much testing but Visual Studio with desktop development for C++ might be required. I've gotten cl.exe errors on a previous install
|
10 |
|
11 |
make sure you setup the environment by using windows-setup.bat
|
12 |
+
after everything is done just download a model using download-model.bat
|
13 |
to quant, use convert-model-auto.bat. Enter the model's folder name, then the BPW for the model
|
exl2-windows-local/windows-setup.bat
CHANGED
@@ -48,7 +48,7 @@ venv\scripts\python.exe -m pip install -r exllamav2/requirements.txt -q
|
|
48 |
venv\scripts\python.exe -m pip install huggingface-hub -q
|
49 |
venv\scripts\python.exe -m pip install .\exllamav2 -q
|
50 |
|
51 |
-
move
|
52 |
move convert-model-auto.bat exllamav2
|
53 |
move download-model.py exllamav2
|
54 |
move venv exllamav2
|
|
|
48 |
venv\scripts\python.exe -m pip install huggingface-hub -q
|
49 |
venv\scripts\python.exe -m pip install .\exllamav2 -q
|
50 |
|
51 |
+
move download-model.bat exllamav2
|
52 |
move convert-model-auto.bat exllamav2
|
53 |
move download-model.py exllamav2
|
54 |
move venv exllamav2
|