KingNish commited on
Commit
a8a6e89
Β·
verified Β·
1 Parent(s): a60ab8d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -51,7 +51,7 @@ pipe_edit.to("cuda")
51
 
52
  def promptifier(prompt):
53
  client1 = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
54
- system_instructions1 = "<s>[SYSTEM] Your task is to modify prompt by USER to more better prompt for Image Generation in Stable Diffusion XL, you have to optiomize prompt and also add some keywords like, 4k, realistic, featuristic according to prompt. Your task is to reply with final optimized prompt only. Just reply with optimized prompt only.[USER]"
55
  formatted_prompt = f"{system_instructions1} {prompt} [PROMPT]"
56
  stream = client1.text_generation(formatted_prompt, max_new_tokens=80, stream=True, details=True, return_full_text=False)
57
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
 
51
 
52
  def promptifier(prompt):
53
  client1 = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
54
+ system_instructions1 = "<s>[SYSTEM] Your task is to modify prompt by USER to more better prompt for Image Generation in Stable Diffusion XL, you have to optiomize prompt and also add some keywords like: cute, masterpiece, 4k, realistic, featuristic, or styles according to prompt, or anything good which help in generating better image like use want \n Your task is to reply with final optimized prompt only. If you get big prompt make it concise.[USER]"
55
  formatted_prompt = f"{system_instructions1} {prompt} [PROMPT]"
56
  stream = client1.text_generation(formatted_prompt, max_new_tokens=80, stream=True, details=True, return_full_text=False)
57
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])