Spaces:
Running
Running
Commit
·
be07910
1
Parent(s):
cb91aa8
qqqq
Browse files
minigpt4/configs/models/minigpt_v2.yaml
CHANGED
@@ -11,7 +11,7 @@ model:
|
|
11 |
# generation configs
|
12 |
prompt: ""
|
13 |
|
14 |
-
llama_model: /
|
15 |
# llama_model: "/home/user/project/Emotion-LLaMA/checkpoints/Llama-2-7b-chat-hf"
|
16 |
lora_r: 64
|
17 |
lora_alpha: 16
|
|
|
11 |
# generation configs
|
12 |
prompt: ""
|
13 |
|
14 |
+
llama_model: "ZebangCheng/Emotion-LLaMA"
|
15 |
# llama_model: "/home/user/project/Emotion-LLaMA/checkpoints/Llama-2-7b-chat-hf"
|
16 |
lora_r: 64
|
17 |
lora_alpha: 16
|
minigpt4/conversation/conversation.py
CHANGED
@@ -12,6 +12,7 @@ import torch
|
|
12 |
from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer
|
13 |
from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
14 |
from transformers import Wav2Vec2FeatureExtractor
|
|
|
15 |
|
16 |
import dataclasses
|
17 |
from enum import auto, Enum
|
@@ -263,11 +264,13 @@ class Chat:
|
|
263 |
# model_file = "checkpoints/transformer/chinese-hubert-large"
|
264 |
model_file = "ZebangCheng/chinese-hubert-large"
|
265 |
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_file)
|
|
|
266 |
input_values = feature_extractor(samples, sampling_rate=sr, return_tensors="pt").input_values
|
267 |
# print("input_values:", input_values)
|
268 |
|
269 |
from transformers import HubertModel
|
270 |
-
hubert_model = HubertModel.from_pretrained(model_file)
|
|
|
271 |
hubert_model.eval()
|
272 |
with torch.no_grad():
|
273 |
hidden_states = hubert_model(input_values, output_hidden_states=True).hidden_states # tuple of (B, T, D)
|
|
|
12 |
from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer
|
13 |
from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
14 |
from transformers import Wav2Vec2FeatureExtractor
|
15 |
+
from transformers import AutoProcessor, AutoModel
|
16 |
|
17 |
import dataclasses
|
18 |
from enum import auto, Enum
|
|
|
264 |
# model_file = "checkpoints/transformer/chinese-hubert-large"
|
265 |
model_file = "ZebangCheng/chinese-hubert-large"
|
266 |
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_file)
|
267 |
+
|
268 |
input_values = feature_extractor(samples, sampling_rate=sr, return_tensors="pt").input_values
|
269 |
# print("input_values:", input_values)
|
270 |
|
271 |
from transformers import HubertModel
|
272 |
+
# hubert_model = HubertModel.from_pretrained(model_file)
|
273 |
+
hubert_model = AutoModel.from_pretrained("ZebangCheng/chinese-hubert-large")
|
274 |
hubert_model.eval()
|
275 |
with torch.no_grad():
|
276 |
hidden_states = hubert_model(input_values, output_hidden_states=True).hidden_states # tuple of (B, T, D)
|
minigpt4/models/base_model.py
CHANGED
@@ -13,7 +13,9 @@ from omegaconf import OmegaConf
|
|
13 |
import numpy as np
|
14 |
import torch
|
15 |
import torch.nn as nn
|
16 |
-
from transformers import LlamaTokenizer
|
|
|
|
|
17 |
from peft import (
|
18 |
LoraConfig,
|
19 |
get_peft_model,
|
@@ -23,7 +25,8 @@ from peft import (
|
|
23 |
from minigpt4.common.dist_utils import download_cached_file
|
24 |
from minigpt4.common.utils import get_abs_path, is_url
|
25 |
from minigpt4.models.eva_vit import create_eva_vit_g
|
26 |
-
from minigpt4.models.modeling_llama import LlamaForCausalLM
|
|
|
27 |
|
28 |
|
29 |
|
@@ -172,7 +175,9 @@ class BaseModel(nn.Module):
|
|
172 |
def init_llm(cls, llama_model_path, low_resource=False, low_res_device=0, lora_r=0,
|
173 |
lora_target_modules=["q_proj","k_proj"], **lora_kargs):
|
174 |
logging.info('Loading LLAMA')
|
175 |
-
|
|
|
|
|
176 |
llama_tokenizer.pad_token = "$$"
|
177 |
|
178 |
if low_resource:
|
|
|
13 |
import numpy as np
|
14 |
import torch
|
15 |
import torch.nn as nn
|
16 |
+
# from transformers import LlamaTokenizer
|
17 |
+
from transformers import AutoTokenizer
|
18 |
+
|
19 |
from peft import (
|
20 |
LoraConfig,
|
21 |
get_peft_model,
|
|
|
25 |
from minigpt4.common.dist_utils import download_cached_file
|
26 |
from minigpt4.common.utils import get_abs_path, is_url
|
27 |
from minigpt4.models.eva_vit import create_eva_vit_g
|
28 |
+
# from minigpt4.models.modeling_llama import LlamaForCausalLM
|
29 |
+
from transformers.models.llama.modeling_llama import LlamaForCausalLM
|
30 |
|
31 |
|
32 |
|
|
|
175 |
def init_llm(cls, llama_model_path, low_resource=False, low_res_device=0, lora_r=0,
|
176 |
lora_target_modules=["q_proj","k_proj"], **lora_kargs):
|
177 |
logging.info('Loading LLAMA')
|
178 |
+
llama_model_path
|
179 |
+
# llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False)
|
180 |
+
llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_path)
|
181 |
llama_tokenizer.pad_token = "$$"
|
182 |
|
183 |
if low_resource:
|