adaface-neurips commited on
Commit
5604534
·
1 Parent(s): eaf48ba

Change face_app detection size

Browse files
Files changed (1) hide show
  1. lib/pipline_ConsistentID.py +7 -7
lib/pipline_ConsistentID.py CHANGED
@@ -12,7 +12,6 @@ from huggingface_hub.utils import validate_hf_hub_args
12
  from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
13
  from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
14
  from diffusers.pipelines.stable_diffusion import StableDiffusionPipeline
15
- from diffusers.utils.import_utils import is_xformers_available
16
  from .functions import insert_markers_to_prompt, masks_for_unique_values, apply_mask_to_raw_image, tokenize_and_mask_noun_phrases_ends, prepare_image_token_idx
17
  from .functions import ProjPlusModel, masks_for_unique_values
18
  from .attention import Consistent_IPAttProcessor, Consistent_AttProcessor, FacialEncoder
@@ -39,11 +38,11 @@ class ConsistentIDPipeline(StableDiffusionPipeline):
39
  torch_device: Optional[Union[str, torch.device]] = None,
40
  torch_dtype: Optional[torch.dtype] = None,
41
  ):
42
- super().to(torch_device, torch_dtype)
43
- self.bise_net.to(torch_device, dtype=torch_dtype)
44
- self.clip_encoder.to(torch_device, dtype=torch_dtype)
45
- self.image_proj_model.to(torch_device, dtype=torch_dtype)
46
- self.FacialEncoder.to(torch_device, dtype=torch_dtype)
47
  # If the unet is not released, the ip_layers should be moved to the specified device and dtype.
48
  if not isinstance(self.unet, edict):
49
  self.ip_layers.to(torch_device, dtype=torch_dtype)
@@ -75,7 +74,8 @@ class ConsistentIDPipeline(StableDiffusionPipeline):
75
 
76
  # face_app: FaceAnalysis object
77
  self.face_app = FaceAnalysis(name="buffalo_l", root='models/insightface', providers=['CPUExecutionProvider'])
78
- self.face_app.prepare(ctx_id=0, det_size=(640, 640))
 
79
 
80
  if not os.path.exists(consistentID_weight_path):
81
  ### Download pretrained models
 
12
  from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
13
  from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
14
  from diffusers.pipelines.stable_diffusion import StableDiffusionPipeline
 
15
  from .functions import insert_markers_to_prompt, masks_for_unique_values, apply_mask_to_raw_image, tokenize_and_mask_noun_phrases_ends, prepare_image_token_idx
16
  from .functions import ProjPlusModel, masks_for_unique_values
17
  from .attention import Consistent_IPAttProcessor, Consistent_AttProcessor, FacialEncoder
 
38
  torch_device: Optional[Union[str, torch.device]] = None,
39
  torch_dtype: Optional[torch.dtype] = None,
40
  ):
41
+ super().to(torch_device, dtype=torch_dtype)
42
+ self.bise_net.to(torch_device, dtype=torch_dtype)
43
+ self.clip_encoder.to(torch_device, dtype=torch_dtype)
44
+ self.image_proj_model.to(torch_device, dtype=torch_dtype)
45
+ self.FacialEncoder.to(torch_device, dtype=torch_dtype)
46
  # If the unet is not released, the ip_layers should be moved to the specified device and dtype.
47
  if not isinstance(self.unet, edict):
48
  self.ip_layers.to(torch_device, dtype=torch_dtype)
 
74
 
75
  # face_app: FaceAnalysis object
76
  self.face_app = FaceAnalysis(name="buffalo_l", root='models/insightface', providers=['CPUExecutionProvider'])
77
+ # The original det_size=(640, 640) is too large and face_app often fails to detect faces.
78
+ self.face_app.prepare(ctx_id=0, det_size=(512, 512))
79
 
80
  if not os.path.exists(consistentID_weight_path):
81
  ### Download pretrained models