KingNish commited on
Commit
a38d8a4
Β·
verified Β·
1 Parent(s): 3f89dc7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -51,7 +51,7 @@ pipe_edit.to("cuda")
51
 
52
  def promptifier(prompt):
53
  client1 = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
54
- system_instructions1 = "<s>[SYSTEM] Your task is to modify prompt by USER to more better prompt for Image Generation in Stable Diffusion XL, you have to optiomize prompt and also add some keywords like, 4k, realistic, featuristic according to prompt and also break prompt into sub-lines using comma, Your task is to reply with final optimized prompt only. Just reply with prompt only.[USER]"
55
  formatted_prompt = f"{system_instructions1} {prompt} [PROMPT]"
56
  stream = client1.text_generation(formatted_prompt, max_new_tokens=80, stream=True, details=True, return_full_text=False)
57
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
 
51
 
52
  def promptifier(prompt):
53
  client1 = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
54
+ system_instructions1 = "<s>[SYSTEM] Your task is to modify prompt by USER to more better prompt for Image Generation in Stable Diffusion XL, you have to optiomize prompt and also add some keywords like, 4k, realistic, featuristic according to prompt. Your task is to reply with final optimized prompt only. Just reply with optimized prompt only.[USER]"
55
  formatted_prompt = f"{system_instructions1} {prompt} [PROMPT]"
56
  stream = client1.text_generation(formatted_prompt, max_new_tokens=80, stream=True, details=True, return_full_text=False)
57
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])