Praveen0309 commited on
Commit
123a5f5
·
1 Parent(s): 91c1fc8

final commit

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -16,16 +16,16 @@ app = Flask(__name__)
16
 
17
  warnings.filterwarnings('ignore')
18
 
19
- model_id = "HuggingFaceH4/vsft-llava-1.5-7b-hf-trl"
20
- quantization_config = BitsAndBytesConfig(load_in_4bit=True)
21
- base_model = LlavaForConditionalGeneration.from_pretrained(model_id, quantization_config=quantization_config, torch_dtype=torch.float16)
22
 
23
- # Load the PEFT Lora adapter
24
- peft_lora_adapter_path = "Praveen0309/llava-1.5-7b-hf-ft-mix-vsft-3"
25
- peft_lora_adapter = PeftModel.from_pretrained(base_model, peft_lora_adapter_path, adapter_name="lora_adapter")
26
- base_model.load_adapter(peft_lora_adapter_path, adapter_name="lora_adapter")
27
 
28
- processor = AutoProcessor.from_pretrained("HuggingFaceH4/vsft-llava-1.5-7b-hf-trl")
29
  # model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
30
  # tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
31
 
@@ -144,8 +144,8 @@ def get_bot_response():
144
  image = image_cache['image']
145
  # print(image)
146
  query = request.args.get('msg')
147
- # output = query
148
- output = google_response(image, query)
149
  return output
150
  else:
151
  return "Please upload an image to continue"
 
16
 
17
  warnings.filterwarnings('ignore')
18
 
19
+ # model_id = "HuggingFaceH4/vsft-llava-1.5-7b-hf-trl"
20
+ # quantization_config = BitsAndBytesConfig(load_in_4bit=True)
21
+ # base_model = LlavaForConditionalGeneration.from_pretrained(model_id, quantization_config=quantization_config, torch_dtype=torch.float16)
22
 
23
+ # # Load the PEFT Lora adapter
24
+ # peft_lora_adapter_path = "Praveen0309/llava-1.5-7b-hf-ft-mix-vsft-3"
25
+ # peft_lora_adapter = PeftModel.from_pretrained(base_model, peft_lora_adapter_path, adapter_name="lora_adapter")
26
+ # base_model.load_adapter(peft_lora_adapter_path, adapter_name="lora_adapter")
27
 
28
+ # processor = AutoProcessor.from_pretrained("HuggingFaceH4/vsft-llava-1.5-7b-hf-trl")
29
  # model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
30
  # tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
31
 
 
144
  image = image_cache['image']
145
  # print(image)
146
  query = request.args.get('msg')
147
+ output = query
148
+ # output = google_response(image, query)
149
  return output
150
  else:
151
  return "Please upload an image to continue"