merve HF Staff commited on
Commit
75d4b81
·
verified ·
1 Parent(s): 7bffede

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -7,6 +7,7 @@ datasets:
7
  base_model:
8
  - Qwen/Qwen2.5-VL-7B-Instruct
9
  library_name: transformers
 
10
  ---
11
 
12
  <img alt="olmOCR Logo" src="https://huggingface.co/datasets/allenai/blog-images/resolve/main/olmocr/olmocr.png" width="242px" style="margin-left:'auto' margin-right:'auto' display:'block'">
@@ -37,7 +38,7 @@ A simple way to infer using transformers is as follows:
37
  ```python
38
  import torch from transformers import AutoModelForImageTextToText, AutoProcessor
39
 
40
- model_id = "allenai/olmOCR-7B-0725"
41
  processor = AutoProcessor.from_pretrained(model_id)
42
  model = AutoModelForImageTextToText.from_pretrained(model_id, torch_dtype=torch.float16 ).to("cuda").eval()
43
 
@@ -68,7 +69,7 @@ messages = [
68
  "type": "image",
69
  "image": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolvlm_table.png",
70
  },
71
- {"type": "text", "text": "OCR"},
72
  ],
73
  }
74
  ]
@@ -78,7 +79,6 @@ text = processor.apply_chat_template(
78
  )
79
  inputs = processor.apply_chat_template(
80
  messages,
81
- video_fps=1,
82
  add_generation_prompt=True,
83
  tokenize=True,
84
  return_dict=True,
 
7
  base_model:
8
  - Qwen/Qwen2.5-VL-7B-Instruct
9
  library_name: transformers
10
+ pipeline_tag: image-text-to-text
11
  ---
12
 
13
  <img alt="olmOCR Logo" src="https://huggingface.co/datasets/allenai/blog-images/resolve/main/olmocr/olmocr.png" width="242px" style="margin-left:'auto' margin-right:'auto' display:'block'">
 
38
  ```python
39
  import torch from transformers import AutoModelForImageTextToText, AutoProcessor
40
 
41
+ model_id = "allenai/olmOCR-7B-0825"
42
  processor = AutoProcessor.from_pretrained(model_id)
43
  model = AutoModelForImageTextToText.from_pretrained(model_id, torch_dtype=torch.float16 ).to("cuda").eval()
44
 
 
69
  "type": "image",
70
  "image": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolvlm_table.png",
71
  },
72
+ {"type": "text", "text": PROMPT},
73
  ],
74
  }
75
  ]
 
79
  )
80
  inputs = processor.apply_chat_template(
81
  messages,
 
82
  add_generation_prompt=True,
83
  tokenize=True,
84
  return_dict=True,