JayKimDevolved
commited on
JayKimDevolved/deepseek
Browse files- .gitattributes +12 -0
- README.md +2 -2
- adapter_config.json +2 -2
- adapter_model.safetensors +1 -1
- ollama1570737069/ollama.pid +1 -0
- ollama1570737069/runners/cpu/ollama_llama_server +3 -0
- ollama1570737069/runners/cpu_avx/ollama_llama_server +3 -0
- ollama1570737069/runners/cpu_avx2/ollama_llama_server +3 -0
- ollama1570737069/runners/cuda_v11/ollama_llama_server +3 -0
- ollama1570737069/runners/cuda_v12/ollama_llama_server +3 -0
- ollama1570737069/runners/rocm/ollama_llama_server +3 -0
- ollama1718139592/ollama.pid +1 -0
- ollama1718139592/runners/cpu/ollama_llama_server +3 -0
- ollama1718139592/runners/cpu_avx/ollama_llama_server +3 -0
- ollama1718139592/runners/cpu_avx2/ollama_llama_server +3 -0
- ollama1718139592/runners/cuda_v11/ollama_llama_server +3 -0
- ollama1718139592/runners/cuda_v12/ollama_llama_server +3 -0
- ollama1718139592/runners/rocm/ollama_llama_server +3 -0
- tmpkds2w0wa/__pycache__/_remote_module_non_scriptable.cpython-310.pyc +0 -0
- tmpkds2w0wa/_remote_module_non_scriptable.py +81 -0
- training_args.bin +1 -1
.gitattributes
CHANGED
@@ -36,3 +36,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
36 |
pip-install-ghxuqwgs/numpy_78e94bf2b6094bf9a1f3d92042f9bf46/build/temp.linux-x86_64-cpython-310/build/src.linux-x86_64-3.1/numpy/core/src/multiarray/arraytypes.o filter=lfs diff=lfs merge=lfs -text
|
37 |
pip-install-ghxuqwgs/numpy_78e94bf2b6094bf9a1f3d92042f9bf46/build/temp.linux-x86_64-cpython-310/libnpysort.a filter=lfs diff=lfs merge=lfs -text
|
38 |
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
pip-install-ghxuqwgs/numpy_78e94bf2b6094bf9a1f3d92042f9bf46/build/temp.linux-x86_64-cpython-310/build/src.linux-x86_64-3.1/numpy/core/src/multiarray/arraytypes.o filter=lfs diff=lfs merge=lfs -text
|
37 |
pip-install-ghxuqwgs/numpy_78e94bf2b6094bf9a1f3d92042f9bf46/build/temp.linux-x86_64-cpython-310/libnpysort.a filter=lfs diff=lfs merge=lfs -text
|
38 |
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
39 |
+
ollama1570737069/runners/cpu/ollama_llama_server filter=lfs diff=lfs merge=lfs -text
|
40 |
+
ollama1570737069/runners/cpu_avx/ollama_llama_server filter=lfs diff=lfs merge=lfs -text
|
41 |
+
ollama1570737069/runners/cpu_avx2/ollama_llama_server filter=lfs diff=lfs merge=lfs -text
|
42 |
+
ollama1570737069/runners/cuda_v11/ollama_llama_server filter=lfs diff=lfs merge=lfs -text
|
43 |
+
ollama1570737069/runners/cuda_v12/ollama_llama_server filter=lfs diff=lfs merge=lfs -text
|
44 |
+
ollama1570737069/runners/rocm/ollama_llama_server filter=lfs diff=lfs merge=lfs -text
|
45 |
+
ollama1718139592/runners/cpu/ollama_llama_server filter=lfs diff=lfs merge=lfs -text
|
46 |
+
ollama1718139592/runners/cpu_avx/ollama_llama_server filter=lfs diff=lfs merge=lfs -text
|
47 |
+
ollama1718139592/runners/cpu_avx2/ollama_llama_server filter=lfs diff=lfs merge=lfs -text
|
48 |
+
ollama1718139592/runners/cuda_v11/ollama_llama_server filter=lfs diff=lfs merge=lfs -text
|
49 |
+
ollama1718139592/runners/cuda_v12/ollama_llama_server filter=lfs diff=lfs merge=lfs -text
|
50 |
+
ollama1718139592/runners/rocm/ollama_llama_server filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -35,8 +35,8 @@ This model was trained with SFT.
|
|
35 |
### Framework versions
|
36 |
|
37 |
- TRL: 0.13.0
|
38 |
-
- Transformers: 4.
|
39 |
-
- Pytorch: 2.1
|
40 |
- Datasets: 3.2.0
|
41 |
- Tokenizers: 0.21.0
|
42 |
|
|
|
35 |
### Framework versions
|
36 |
|
37 |
- TRL: 0.13.0
|
38 |
+
- Transformers: 4.48.1
|
39 |
+
- Pytorch: 2.5.1
|
40 |
- Datasets: 3.2.0
|
41 |
- Tokenizers: 0.21.0
|
42 |
|
adapter_config.json
CHANGED
@@ -24,9 +24,9 @@
|
|
24 |
"revision": null,
|
25 |
"target_modules": [
|
26 |
"v_proj",
|
27 |
-
"q_proj",
|
28 |
-
"k_proj",
|
29 |
"gate_proj",
|
|
|
|
|
30 |
"o_proj"
|
31 |
],
|
32 |
"task_type": "CAUSAL_LM",
|
|
|
24 |
"revision": null,
|
25 |
"target_modules": [
|
26 |
"v_proj",
|
|
|
|
|
27 |
"gate_proj",
|
28 |
+
"k_proj",
|
29 |
+
"q_proj",
|
30 |
"o_proj"
|
31 |
],
|
32 |
"task_type": "CAUSAL_LM",
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 369142184
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:97974890d929435a7c68aed25e299075faa6d6922c790f3afded854f32dc7cc7
|
3 |
size 369142184
|
ollama1570737069/ollama.pid
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
6455
|
ollama1570737069/runners/cpu/ollama_llama_server
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:200b9fd1f8d2b5727d7b28c322c21c200d5bc977f73fff47dbc32fb71f398f7a
|
3 |
+
size 8919824
|
ollama1570737069/runners/cpu_avx/ollama_llama_server
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:76ded4958ec90932962962382565a37252d79b5bfde63fea6bf16b7b7f7fd371
|
3 |
+
size 8985392
|
ollama1570737069/runners/cpu_avx2/ollama_llama_server
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3123dc06e08a26b90af4694d0c513b1795c3dc252d1c76586317cbfefaf777cd
|
3 |
+
size 8997680
|
ollama1570737069/runners/cuda_v11/ollama_llama_server
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b74174d557c6fd9de9af3ab2adb54ae81d4b584af38a85d0a8a25c57f69ec494
|
3 |
+
size 8997776
|
ollama1570737069/runners/cuda_v12/ollama_llama_server
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b6940d1e48c100d3d8806c8c4f6df84f83920de9ce89d493c33405e242e9ef43
|
3 |
+
size 8960912
|
ollama1570737069/runners/rocm/ollama_llama_server
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:90fa16551538f9ac96d06ab8b2e180e11b5d23ffb6db36437903c5570c827239
|
3 |
+
size 8952720
|
ollama1718139592/ollama.pid
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
6102
|
ollama1718139592/runners/cpu/ollama_llama_server
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:200b9fd1f8d2b5727d7b28c322c21c200d5bc977f73fff47dbc32fb71f398f7a
|
3 |
+
size 8919824
|
ollama1718139592/runners/cpu_avx/ollama_llama_server
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:76ded4958ec90932962962382565a37252d79b5bfde63fea6bf16b7b7f7fd371
|
3 |
+
size 8985392
|
ollama1718139592/runners/cpu_avx2/ollama_llama_server
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3123dc06e08a26b90af4694d0c513b1795c3dc252d1c76586317cbfefaf777cd
|
3 |
+
size 8997680
|
ollama1718139592/runners/cuda_v11/ollama_llama_server
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b74174d557c6fd9de9af3ab2adb54ae81d4b584af38a85d0a8a25c57f69ec494
|
3 |
+
size 8997776
|
ollama1718139592/runners/cuda_v12/ollama_llama_server
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b6940d1e48c100d3d8806c8c4f6df84f83920de9ce89d493c33405e242e9ef43
|
3 |
+
size 8960912
|
ollama1718139592/runners/rocm/ollama_llama_server
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:90fa16551538f9ac96d06ab8b2e180e11b5d23ffb6db36437903c5570c827239
|
3 |
+
size 8952720
|
tmpkds2w0wa/__pycache__/_remote_module_non_scriptable.cpython-310.pyc
ADDED
Binary file (1.5 kB). View file
|
|
tmpkds2w0wa/_remote_module_non_scriptable.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import *
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.distributed.rpc as rpc
|
5 |
+
from torch import Tensor
|
6 |
+
from torch._jit_internal import Future
|
7 |
+
from torch.distributed.rpc import RRef
|
8 |
+
from typing import Tuple # pyre-ignore: unused import
|
9 |
+
|
10 |
+
|
11 |
+
module_interface_cls = None
|
12 |
+
|
13 |
+
|
14 |
+
def forward_async(self, *args, **kwargs):
|
15 |
+
args = (self.module_rref, self.device, self.is_device_map_set, *args)
|
16 |
+
kwargs = {**kwargs}
|
17 |
+
return rpc.rpc_async(
|
18 |
+
self.module_rref.owner(),
|
19 |
+
_remote_forward,
|
20 |
+
args,
|
21 |
+
kwargs,
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
def forward(self, *args, **kwargs):
|
26 |
+
args = (self.module_rref, self.device, self.is_device_map_set, *args)
|
27 |
+
kwargs = {**kwargs}
|
28 |
+
ret_fut = rpc.rpc_async(
|
29 |
+
self.module_rref.owner(),
|
30 |
+
_remote_forward,
|
31 |
+
args,
|
32 |
+
kwargs,
|
33 |
+
)
|
34 |
+
return ret_fut.wait()
|
35 |
+
|
36 |
+
|
37 |
+
_generated_methods = [
|
38 |
+
forward_async,
|
39 |
+
forward,
|
40 |
+
]
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
def _remote_forward(
|
46 |
+
module_rref: RRef[module_interface_cls], device: str, is_device_map_set: bool, *args, **kwargs):
|
47 |
+
module = module_rref.local_value()
|
48 |
+
device = torch.device(device)
|
49 |
+
|
50 |
+
if device.type != "cuda":
|
51 |
+
return module.forward(*args, **kwargs)
|
52 |
+
|
53 |
+
# If the module is on a cuda device,
|
54 |
+
# move any CPU tensor in args or kwargs to the same cuda device.
|
55 |
+
# Since torch script does not support generator expression,
|
56 |
+
# have to use concatenation instead of
|
57 |
+
# ``tuple(i.to(device) if isinstance(i, Tensor) else i for i in *args)``.
|
58 |
+
args = (*args,)
|
59 |
+
out_args: Tuple[()] = ()
|
60 |
+
for arg in args:
|
61 |
+
arg = (arg.to(device),) if isinstance(arg, Tensor) else (arg,)
|
62 |
+
out_args = out_args + arg
|
63 |
+
|
64 |
+
kwargs = {**kwargs}
|
65 |
+
for k, v in kwargs.items():
|
66 |
+
if isinstance(v, Tensor):
|
67 |
+
kwargs[k] = kwargs[k].to(device)
|
68 |
+
|
69 |
+
if is_device_map_set:
|
70 |
+
return module.forward(*out_args, **kwargs)
|
71 |
+
|
72 |
+
# If the device map is empty, then only CPU tensors are allowed to send over wire,
|
73 |
+
# so have to move any GPU tensor to CPU in the output.
|
74 |
+
# Since torch script does not support generator expression,
|
75 |
+
# have to use concatenation instead of
|
76 |
+
# ``tuple(i.cpu() if isinstance(i, Tensor) else i for i in module.forward(*out_args, **kwargs))``.
|
77 |
+
ret: Tuple[()] = ()
|
78 |
+
for i in module.forward(*out_args, **kwargs):
|
79 |
+
i = (i.cpu(),) if isinstance(i, Tensor) else (i,)
|
80 |
+
ret = ret + i
|
81 |
+
return ret
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5560
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9f019f87aed1c047e337b35c7a59f80057ec714320781f3babafafff0cea488
|
3 |
size 5560
|