danhtran2mind commited on
Commit
8a17ca8
·
verified ·
1 Parent(s): ed21c4d

Update gradio_app/config.py

Browse files
Files changed (1) hide show
  1. gradio_app/config.py +74 -43
gradio_app/config.py CHANGED
@@ -1,44 +1,75 @@
1
- import logging
2
-
3
- # Configure logging
4
- logging.basicConfig(level=logging.INFO)
5
- logger = logging.getLogger(__name__)
6
-
7
- # LoRA configurations
8
- LORA_CONFIGS = {
9
- "Gemma-3-1B-Instruct-Vi-Medical-LoRA": {
10
- "base_model": "unsloth/gemma-3-1b-it",
11
- "lora_adapter": "danhtran2mind/Gemma-3-1B-Instruct-Vi-Medical-LoRA"
12
- },
13
- "Gemma-3-1B-GRPO-Vi-Medical-LoRA": {
14
- "base_model": "unsloth/gemma-3-1b-it",
15
- "lora_adapter": "danhtran2mind/Gemma-3-1B-GRPO-Vi-Medical-LoRA"
16
- },
17
- "Llama-3.2-3B-Instruct-Vi-Medical-LoRA": {
18
- "base_model": "unsloth/Llama-3.2-3B-Instruct",
19
- "lora_adapter": "danhtran2mind/Llama-3.2-3B-Instruct-Vi-Medical-LoRA"
20
- },
21
- "Llama-3.2-1B-Instruct-Vi-Medical-LoRA": {
22
- "base_model": "unsloth/Llama-3.2-1B-Instruct",
23
- "lora_adapter": "danhtran2mind/Llama-3.2-1B-Instruct-Vi-Medical-LoRA"
24
- },
25
- "Llama-3.2-3B-Reasoning-Vi-Medical-LoRA": {
26
- "base_model": "unsloth/Llama-3.2-3B-Instruct",
27
- "lora_adapter": "danhtran2mind/Llama-3.2-3B-Reasoning-Vi-Medical-LoRA"
28
- },
29
- "Qwen-3-0.6B-Instruct-Vi-Medical-LoRA": {
30
- "base_model": "Qwen/Qwen3-0.6B",
31
- "lora_adapter": "danhtran2mind/Qwen-3-0.6B-Instruct-Vi-Medical-LoRA"
32
- },
33
- "Qwen-3-0.6B-Reasoning-Vi-Medical-LoRA": {
34
- "base_model": "Qwen/Qwen3-0.6B",
35
- "lora_adapter": "danhtran2mind/Qwen-3-0.6B-Reasoning-Vi-Medical-LoRA"
36
- }
37
- }
38
-
39
- # Model settings
40
- MAX_INPUT_TOKEN_LENGTH = 4096
41
- DEFAULT_MAX_NEW_TOKENS = 512
42
- MAX_MAX_NEW_TOKENS = 2048
43
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  MODEL_IDS = list(LORA_CONFIGS.keys())
 
1
+ import logging
2
+
3
+ # Configure logging
4
+ logging.basicConfig(level=logging.INFO)
5
+ logger = logging.getLogger(__name__)
6
+
7
+ # LoRA configurations
8
+ # LORA_CONFIGS = {
9
+ # "Gemma-3-1B-Instruct-Vi-Medical-LoRA": {
10
+ # "base_model": "unsloth/gemma-3-1b-it",
11
+ # "lora_adapter": "danhtran2mind/Gemma-3-1B-Instruct-Vi-Medical-LoRA"
12
+ # },
13
+ # "Gemma-3-1B-GRPO-Vi-Medical-LoRA": {
14
+ # "base_model": "unsloth/gemma-3-1b-it",
15
+ # "lora_adapter": "danhtran2mind/Gemma-3-1B-GRPO-Vi-Medical-LoRA"
16
+ # },
17
+ # "Llama-3.2-3B-Instruct-Vi-Medical-LoRA": {
18
+ # "base_model": "unsloth/Llama-3.2-3B-Instruct",
19
+ # "lora_adapter": "danhtran2mind/Llama-3.2-3B-Instruct-Vi-Medical-LoRA"
20
+ # },
21
+ # "Llama-3.2-1B-Instruct-Vi-Medical-LoRA": {
22
+ # "base_model": "unsloth/Llama-3.2-1B-Instruct",
23
+ # "lora_adapter": "danhtran2mind/Llama-3.2-1B-Instruct-Vi-Medical-LoRA"
24
+ # },
25
+ # "Llama-3.2-3B-Reasoning-Vi-Medical-LoRA": {
26
+ # "base_model": "unsloth/Llama-3.2-3B-Instruct",
27
+ # "lora_adapter": "danhtran2mind/Llama-3.2-3B-Reasoning-Vi-Medical-LoRA"
28
+ # },
29
+ # "Qwen-3-0.6B-Instruct-Vi-Medical-LoRA": {
30
+ # "base_model": "Qwen/Qwen3-0.6B",
31
+ # "lora_adapter": "danhtran2mind/Qwen-3-0.6B-Instruct-Vi-Medical-LoRA"
32
+ # },
33
+ # "Qwen-3-0.6B-Reasoning-Vi-Medical-LoRA": {
34
+ # "base_model": "Qwen/Qwen3-0.6B",
35
+ # "lora_adapter": "danhtran2mind/Qwen-3-0.6B-Reasoning-Vi-Medical-LoRA"
36
+ # }
37
+ # }
38
+
39
+ LORA_CONFIGS = {
40
+ "Gemma-3-1B-Instruct-Vi-Medical-LoRA": {
41
+ "base_model": "google/gemma-3-1b-it",
42
+ "lora_adapter": "danhtran2mind/Gemma-3-1B-Instruct-Vi-Medical-LoRA"
43
+ },
44
+ "Gemma-3-1B-GRPO-Vi-Medical-LoRA": {
45
+ "base_model": "google/gemma-3-1b-it",
46
+ "lora_adapter": "danhtran2mind/Gemma-3-1B-GRPO-Vi-Medical-LoRA"
47
+ },
48
+ "Llama-3.2-3B-Instruct-Vi-Medical-LoRA": {
49
+ "base_model": "meta-llama/Llama-3.2-3B-Instruct",
50
+ "lora_adapter": "danhtran2mind/Llama-3.2-3B-Instruct-Vi-Medical-LoRA"
51
+ },
52
+ "Llama-3.2-1B-Instruct-Vi-Medical-LoRA": {
53
+ "base_model": "meta-llama/Llama-3.2-1B-Instruct",
54
+ "lora_adapter": "danhtran2mind/Llama-3.2-1B-Instruct-Vi-Medical-LoRA"
55
+ },
56
+ "Llama-3.2-3B-Reasoning-Vi-Medical-LoRA": {
57
+ "base_model": "meta-llama/Llama-3.2-3B-Instruct",
58
+ "lora_adapter": "danhtran2mind/Llama-3.2-3B-Reasoning-Vi-Medical-LoRA"
59
+ },
60
+ "Qwen-3-0.6B-Instruct-Vi-Medical-LoRA": {
61
+ "base_model": "Qwen/Qwen3-0.6B",
62
+ "lora_adapter": "danhtran2mind/Qwen-3-0.6B-Instruct-Vi-Medical-LoRA"
63
+ },
64
+ "Qwen-3-0.6B-Reasoning-Vi-Medical-LoRA": {
65
+ "base_model": "Qwen/Qwen3-0.6B",
66
+ "lora_adapter": "danhtran2mind/Qwen-3-0.6B-Reasoning-Vi-Medical-LoRA"
67
+ }
68
+ }
69
+
70
+ # Model settings
71
+ MAX_INPUT_TOKEN_LENGTH = 4096
72
+ DEFAULT_MAX_NEW_TOKENS = 512
73
+ MAX_MAX_NEW_TOKENS = 2048
74
+
75
  MODEL_IDS = list(LORA_CONFIGS.keys())