ga89tiy commited on
Commit
71b35d2
·
1 Parent(s): 50161cc
Files changed (1) hide show
  1. example_code.py +2 -2
example_code.py CHANGED
@@ -22,7 +22,7 @@ def load_model_from_huggingface(repo_id):
22
  model_path = Path(model_path)
23
 
24
  tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, model_base='liuhaotian/llava-v1.5-7b',
25
- model_name="llava-v1.5-7b-task-lora_radialog_instruct_llava_biovil_unfrozen_2e-5_5epochs_v5_checkpoint-21000", load_8bit=False, $
26
 
27
 
28
  return tokenizer, model, image_processor, context_len
@@ -51,7 +51,7 @@ if __name__ == '__main__':
51
  findings = ', '.join(findings).lower().strip()
52
 
53
  conv = conv_vicuna_v1.copy()
54
- REPORT_GEN_PROMPT = f"<image>. Predicted Findings: {findings}. You are to act as a radiologist and write the finding section of a chest x-ray radiology report for this X-ray image and the given predi$
55
  print("USER: ", REPORT_GEN_PROMPT)
56
  conv.append_message("USER", REPORT_GEN_PROMPT)
57
  conv.append_message("ASSISTANT", None)
 
22
  model_path = Path(model_path)
23
 
24
  tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, model_base='liuhaotian/llava-v1.5-7b',
25
+ model_name="llava-v1.5-7b-task-lora_radialog_instruct_llava_biovil_unfrozen_2e-5_5epochs_v5_checkpoint-21000", load_8bit=False, load_4bit=False)
26
 
27
 
28
  return tokenizer, model, image_processor, context_len
 
51
  findings = ', '.join(findings).lower().strip()
52
 
53
  conv = conv_vicuna_v1.copy()
54
+ REPORT_GEN_PROMPT = f"<image>. Predicted Findings: {findings}. You are to act as a radiologist and write the finding section of a chest x-ray radiology report for this X-ray image and the given predicted findings. Write in the style of a radiologist, write one fluent text without enumeration, be concise and don't provide explanations or reasons."
55
  print("USER: ", REPORT_GEN_PROMPT)
56
  conv.append_message("USER", REPORT_GEN_PROMPT)
57
  conv.append_message("ASSISTANT", None)