TiankaiHang commited on
Commit
e031b11
·
1 Parent(s): 6c26e0d
stable_diffusion/ldm/modules/encoders/modules.py CHANGED
@@ -152,7 +152,10 @@ class FrozenCLIPEmbedder(AbstractEncoder):
152
  def forward(self, text):
153
  batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
154
  return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
155
- tokens = batch_encoding["input_ids"].to(self.device)
 
 
 
156
  outputs = self.transformer(input_ids=tokens)
157
 
158
  z = outputs.last_hidden_state
 
152
  def forward(self, text):
153
  batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
154
  return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
155
+ if torch.cuda.is_available():
156
+ tokens = batch_encoding["input_ids"].to(self.device)
157
+ else:
158
+ tokens = batch_encoding["input_ids"]
159
  outputs = self.transformer(input_ids=tokens)
160
 
161
  z = outputs.last_hidden_state