|
--- |
|
license: cc-by-2.0 |
|
--- |
|
## Usage of PathGen-CLIP |
|
|
|
``` |
|
pip install open_clip_torch |
|
``` |
|
|
|
```python |
|
import torch |
|
from PIL import Image |
|
import open_clip |
|
|
|
model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-16', pretrained='path/pathgen-clip.pt') |
|
model.eval() # model in train mode by default, impacts some models with BatchNorm or stochastic depth active |
|
tokenizer = open_clip.get_tokenizer('ViT-B-32') |
|
|
|
image = preprocess(Image.open("example.png")).unsqueeze(0) |
|
text = tokenizer(["An H&E image of tumor patch", "An H&E image of normal patch"]) |
|
|
|
with torch.no_grad(), torch.cuda.amp.autocast(): |
|
image_features = model.encode_image(image) |
|
text_features = model.encode_text(text) |
|
image_features /= image_features.norm(dim=-1, keepdim=True) |
|
text_features /= text_features.norm(dim=-1, keepdim=True) |
|
|
|
text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1) |
|
|
|
print("Label probs:", text_probs) |
|
``` |
|
|
|
|
|
|
|
|
|
|
|
## Cite |
|
|
|
``` |
|
@misc{sun2024pathgen16m16millionpathology, |
|
title={PathGen-1.6M: 1.6 Million Pathology Image-text Pairs Generation through Multi-agent Collaboration}, |
|
author={Yuxuan Sun and Yunlong Zhang and Yixuan Si and Chenglu Zhu and Zhongyi Shui and Kai Zhang and Jingxiong Li and Xingheng Lyu and Tao Lin and Lin Yang}, |
|
year={2024}, |
|
eprint={2407.00203}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CV}, |
|
url={https://arxiv.org/abs/2407.00203}, |
|
} |
|
``` |
|
|
|
|