chore: clean up application with commented optionals
Browse files
app.py
CHANGED
|
@@ -35,6 +35,7 @@ from langchain.chains import LLMChain
|
|
| 35 |
from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
|
| 36 |
from langchain_core.prompts import PromptTemplate
|
| 37 |
from langchain_openai import OpenAI
|
|
|
|
| 38 |
|
| 39 |
from io import BytesIO
|
| 40 |
from time import sleep
|
|
@@ -207,7 +208,7 @@ text_to_speech_pipe.model.enable_cpu_offload()
|
|
| 207 |
text_to_speech_pipe.model.use_flash_attention_2=True
|
| 208 |
text_to_speech_pipe.model.pad_token_id=0 # 50257
|
| 209 |
|
| 210 |
-
|
| 211 |
|
| 212 |
tokenizer = AutoTokenizer.from_pretrained("suno/bark-small")
|
| 213 |
#print("suno/bark-small tokenizer pad_token_id: ", tokenizer.pad_token_id) # 0
|
|
@@ -296,16 +297,16 @@ image_generation_tool_fast = Tool.from_space(
|
|
| 296 |
)
|
| 297 |
|
| 298 |
|
| 299 |
-
|
| 300 |
-
|
| 301 |
|
|
|
|
| 302 |
ceo_model = HfApiModel(
|
| 303 |
max_tokens=2096, # 8096 for manager
|
| 304 |
temperature=0.5,
|
| 305 |
model_id= 'https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud', # "meta-llama/Llama-3.3-70B-Instruct", # 'https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud', # same as Qwen/Qwen2.5-Coder-32B-Instruct
|
| 306 |
custom_role_conversions=None,
|
| 307 |
)
|
| 308 |
-
|
| 309 |
with open("prompts.yaml", 'r') as stream:
|
| 310 |
prompt_templates = yaml.safe_load(stream)
|
| 311 |
|
|
@@ -362,5 +363,4 @@ agent.visualize()
|
|
| 362 |
# prompt = ("navigate to a random wikipedia page and give me a summary of the content, then make a single image representing all the content")
|
| 363 |
# agent.run(prompt)
|
| 364 |
|
| 365 |
-
GradioUI(agent).launch()
|
| 366 |
-
#GradioUIImage(agent).launch()
|
|
|
|
| 35 |
from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
|
| 36 |
from langchain_core.prompts import PromptTemplate
|
| 37 |
from langchain_openai import OpenAI
|
| 38 |
+
from transformers import AutoTokenizer
|
| 39 |
|
| 40 |
from io import BytesIO
|
| 41 |
from time import sleep
|
|
|
|
| 208 |
text_to_speech_pipe.model.use_flash_attention_2=True
|
| 209 |
text_to_speech_pipe.model.pad_token_id=0 # 50257
|
| 210 |
|
| 211 |
+
|
| 212 |
|
| 213 |
tokenizer = AutoTokenizer.from_pretrained("suno/bark-small")
|
| 214 |
#print("suno/bark-small tokenizer pad_token_id: ", tokenizer.pad_token_id) # 0
|
|
|
|
| 297 |
)
|
| 298 |
|
| 299 |
|
| 300 |
+
ceo_model = load_model("LiteLLMModel", "gpt-4o") # or anthropic/claude-3-sonnet
|
|
|
|
| 301 |
|
| 302 |
+
"""
|
| 303 |
ceo_model = HfApiModel(
|
| 304 |
max_tokens=2096, # 8096 for manager
|
| 305 |
temperature=0.5,
|
| 306 |
model_id= 'https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud', # "meta-llama/Llama-3.3-70B-Instruct", # 'https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud', # same as Qwen/Qwen2.5-Coder-32B-Instruct
|
| 307 |
custom_role_conversions=None,
|
| 308 |
)
|
| 309 |
+
"""
|
| 310 |
with open("prompts.yaml", 'r') as stream:
|
| 311 |
prompt_templates = yaml.safe_load(stream)
|
| 312 |
|
|
|
|
| 363 |
# prompt = ("navigate to a random wikipedia page and give me a summary of the content, then make a single image representing all the content")
|
| 364 |
# agent.run(prompt)
|
| 365 |
|
| 366 |
+
GradioUI(agent).launch()
|
|
|