K00B404 commited on
Commit
9adfffc
·
verified ·
1 Parent(s): e7b0eda

Update CLIP.py

Browse files
Files changed (1) hide show
  1. CLIP.py +10 -2
CLIP.py CHANGED
@@ -11,7 +11,7 @@ def load(tokenizer_path = "tokenizer", text_encoder_path = "text_encoder"):
11
  config_path = f"./{text_encoder_path}/config.json"
12
 
13
  # Load tokenizer
14
- tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path)
15
 
16
  # Load CLIPTextModelWithProjection from the config file and safetensor
17
  clip_model = CLIPTextModelWithProjection.from_pretrained(
@@ -25,4 +25,12 @@ def load(tokenizer_path = "tokenizer", text_encoder_path = "text_encoder"):
25
  clip_model.load_state_dict(state_dict)
26
  clip_model = clip_model.to(device)
27
 
28
- return clip_model, tokenizer
 
 
 
 
 
 
 
 
 
11
  config_path = f"./{text_encoder_path}/config.json"
12
 
13
  # Load tokenizer
14
+ clip_tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path)
15
 
16
  # Load CLIPTextModelWithProjection from the config file and safetensor
17
  clip_model = CLIPTextModelWithProjection.from_pretrained(
 
25
  clip_model.load_state_dict(state_dict)
26
  clip_model = clip_model.to(device)
27
 
28
+ return clip_model, clip_tokenizer
29
+
30
+ def load_vae(vae_path='vae'):
31
+ return AutoencoderKL.from_pretrained(vae_pathj)
32
+
33
+ # Example function for processing prompts
34
+ def encode_prompt(prompt,tokenizer,clip_model):
35
+ inputs = tokenizer(prompt, return_tensors="pt")
36
+ return clip_model(**inputs).last_hidden_state