HatmanStack commited on
Commit
c8d5c42
·
1 Parent(s): 4b05d70

checkpoint

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -15,7 +15,7 @@ from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
 
17
  # Model checkpoint
18
- model_checkpoint = "gokaygokay/Flux-Prompt-Enhance"
19
 
20
  # Tokenizer
21
  tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
@@ -58,7 +58,7 @@ class PromptEnhancer:
58
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
59
 
60
  # Model checkpoint
61
- self.model_checkpoint = "gokaygokay/Flux-Prompt-Enhance"
62
 
63
  # Tokenizer and Model
64
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_checkpoint)
 
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
 
17
  # Model checkpoint
18
+ model_checkpoint = "Hatman/Flux-Prompt-Enhance"
19
 
20
  # Tokenizer
21
  tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
 
58
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
59
 
60
  # Model checkpoint
61
+ self.model_checkpoint = "Hatman/Flux-Prompt-Enhance"
62
 
63
  # Tokenizer and Model
64
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_checkpoint)