Steelskull
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -125,8 +125,8 @@ MS = Model Stock
|
|
125 |
<p>Use qwen format</p>
|
126 |
<h2>Quants: (List of badasses)</h2>
|
127 |
<!--<p>GGUF Quant: </p>
|
128 |
-
<p> - bartowski: <a href="https://huggingface.co/bartowski/Q2.5-MS-Mistoria-72b-GGUF" target="_blank"> Combined-GGUF </a></p>
|
129 |
-
<p> - mradermacher: <a href="https://huggingface.co/mradermacher/Q2.5-MS-Mistoria-72b-GGUF" target="_blank"> GGUF </a>// <a href="https://huggingface.co/mradermacher/Q2.5-MS-Mistoria-72b-i1-GGUF" target="_blank"> Imat-GGUF </a></p>
|
130 |
<h3>Config:</h3>
|
131 |
<pre><code>MODEL_NAME = "Q2.5-MS-Mistoria-72b"
|
132 |
base_model: zetasepic/Qwen2.5-72B-Instruct-abliterated-v2
|
|
|
125 |
<p>Use qwen format</p>
|
126 |
<h2>Quants: (List of badasses)</h2>
|
127 |
<!--<p>GGUF Quant: </p>
|
128 |
+
<p> - bartowski: <a href="https://huggingface.co/bartowski/Q2.5-MS-Mistoria-72b-GGUF" target="_blank"> Combined-GGUF </a></p> -->
|
129 |
+
<p> - mradermacher: <a href="https://huggingface.co/mradermacher/Q2.5-MS-Mistoria-72b-GGUF" target="_blank"> GGUF </a>// <a href="https://huggingface.co/mradermacher/Q2.5-MS-Mistoria-72b-i1-GGUF" target="_blank"> Imat-GGUF </a></p>
|
130 |
<h3>Config:</h3>
|
131 |
<pre><code>MODEL_NAME = "Q2.5-MS-Mistoria-72b"
|
132 |
base_model: zetasepic/Qwen2.5-72B-Instruct-abliterated-v2
|