add AIBOM
#7
by
fatima113
- opened
Qwen_Qwen2.5-3B-Instruct.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bomFormat": "CycloneDX",
|
3 |
+
"specVersion": "1.6",
|
4 |
+
"serialNumber": "urn:uuid:076b0dc2-9333-4f26-860f-8b81465eaffb",
|
5 |
+
"version": 1,
|
6 |
+
"metadata": {
|
7 |
+
"timestamp": "2025-06-05T09:35:56.228809+00:00",
|
8 |
+
"component": {
|
9 |
+
"type": "machine-learning-model",
|
10 |
+
"bom-ref": "Qwen/Qwen2.5-3B-Instruct-9e7eb871-9d9f-58d2-9da6-2f0ef8de7526",
|
11 |
+
"name": "Qwen/Qwen2.5-3B-Instruct",
|
12 |
+
"externalReferences": [
|
13 |
+
{
|
14 |
+
"url": "https://huggingface.co/Qwen/Qwen2.5-3B-Instruct",
|
15 |
+
"type": "documentation"
|
16 |
+
}
|
17 |
+
],
|
18 |
+
"modelCard": {
|
19 |
+
"modelParameters": {
|
20 |
+
"task": "text-generation",
|
21 |
+
"architectureFamily": "qwen2",
|
22 |
+
"modelArchitecture": "Qwen2ForCausalLM"
|
23 |
+
},
|
24 |
+
"properties": [
|
25 |
+
{
|
26 |
+
"name": "library_name",
|
27 |
+
"value": "transformers"
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"name": "base_model",
|
31 |
+
"value": "Qwen/Qwen2.5-3B"
|
32 |
+
}
|
33 |
+
]
|
34 |
+
},
|
35 |
+
"authors": [
|
36 |
+
{
|
37 |
+
"name": "Qwen"
|
38 |
+
}
|
39 |
+
],
|
40 |
+
"licenses": [
|
41 |
+
{
|
42 |
+
"license": {
|
43 |
+
"name": "qwen-research",
|
44 |
+
"url": "https://huggingface.co/Qwen/Qwen2.5-3B-Instruct/blob/main/LICENSE"
|
45 |
+
}
|
46 |
+
}
|
47 |
+
],
|
48 |
+
"description": "Qwen2.5 is the latest series of Qwen large language models. For Qwen2.5, we release a number of base language models and instruction-tuned language models ranging from 0.5 to 72 billion parameters. Qwen2.5 brings the following improvements upon Qwen2:- Significantly **more knowledge** and has greatly improved capabilities in **coding** and **mathematics**, thanks to our specialized expert models in these domains.- Significant improvements in **instruction following**, **generating long texts** (over 8K tokens), **understanding structured data** (e.g, tables), and **generating structured outputs** especially JSON. **More resilient to the diversity of system prompts**, enhancing role-play implementation and condition-setting for chatbots.- **Long-context Support** up to 128K tokens and can generate up to 8K tokens.- **Multilingual support** for over 29 languages, including Chinese, English, French, Spanish, Portuguese, German, Italian, Russian, Japanese, Korean, Vietnamese, Thai, Arabic, and more.**This repo contains the instruction-tuned 3B Qwen2.5 model**, which has the following features:- Type: Causal Language Models- Training Stage: Pretraining & Post-training- Architecture: transformers with RoPE, SwiGLU, RMSNorm, Attention QKV bias and tied word embeddings- Number of Parameters: 3.09B- Number of Paramaters (Non-Embedding): 2.77B- Number of Layers: 36- Number of Attention Heads (GQA): 16 for Q and 2 for KV- Context Length: Full 32,768 tokens and generation 8192 tokensFor more details, please refer to our [blog](https://qwenlm.github.io/blog/qwen2.5/), [GitHub](https://github.com/QwenLM/Qwen2.5), and [Documentation](https://qwen.readthedocs.io/en/latest/).",
|
49 |
+
"tags": [
|
50 |
+
"transformers",
|
51 |
+
"safetensors",
|
52 |
+
"qwen2",
|
53 |
+
"text-generation",
|
54 |
+
"chat",
|
55 |
+
"conversational",
|
56 |
+
"en",
|
57 |
+
"arxiv:2407.10671",
|
58 |
+
"base_model:Qwen/Qwen2.5-3B",
|
59 |
+
"base_model:finetune:Qwen/Qwen2.5-3B",
|
60 |
+
"license:other",
|
61 |
+
"autotrain_compatible",
|
62 |
+
"text-generation-inference",
|
63 |
+
"endpoints_compatible",
|
64 |
+
"region:us"
|
65 |
+
]
|
66 |
+
}
|
67 |
+
}
|
68 |
+
}
|