Spaces:
Running
Running
initial commit
Browse files- .gitignore +4 -0
- agent.py +199 -0
- app.py +19 -0
- requirements.txt +6 -0
.gitignore
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.env
|
| 2 |
+
.vscode
|
| 3 |
+
*.ipynb
|
| 4 |
+
notes.txt
|
agent.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 2 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
| 3 |
+
import requests
|
| 4 |
+
import os
|
| 5 |
+
from typing import List, TypedDict
|
| 6 |
+
from langgraph.types import Command
|
| 7 |
+
from typing import Literal, List
|
| 8 |
+
from langgraph.graph import StateGraph, START, END
|
| 9 |
+
import boto3
|
| 10 |
+
|
| 11 |
+
import streamlit as st
|
| 12 |
+
|
| 13 |
+
ssm = boto3.client(
|
| 14 |
+
'ssm',
|
| 15 |
+
region_name="us-east-2",
|
| 16 |
+
# aws_access_key_id=os.environ['AWS_ACCESS_KEY'],
|
| 17 |
+
# aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY']
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
os.environ['TAVILY_API_KEY'] = ssm.get_parameter(Name="TAVILY_API_KEY", WithDecryption=True)['Parameter']['Value']
|
| 21 |
+
|
| 22 |
+
def get_platform_tips(state) -> Command[Literal['web_search']]:
|
| 23 |
+
"""Conduct a web search to find up-to-date information on how to write an effective post for the provided platform."""
|
| 24 |
+
tavily_tool=TavilySearchResults(max_results=5)
|
| 25 |
+
tavily_output = tavily_tool.invoke(f"tips on how to write an effective post on {state['platform']}")
|
| 26 |
+
prompt = f"""
|
| 27 |
+
Summarize the tips provided in {tavily_output}. These tips will be used to generate a {state['platform']} post
|
| 28 |
+
Output as plain text.
|
| 29 |
+
"""
|
| 30 |
+
response = model.invoke(prompt).content
|
| 31 |
+
return Command(update={"tips": response}, goto="web_search")
|
| 32 |
+
|
| 33 |
+
def web_search(state) -> Command[Literal['generate_post']]:
|
| 34 |
+
"""Conduct a web search to find up-to-date information about a provided topic to be used for a social media post."""
|
| 35 |
+
tavily_tool=TavilySearchResults(max_results=5)
|
| 36 |
+
response = tavily_tool.invoke(state["topic"])
|
| 37 |
+
return Command(update={"tavily_results": response}, goto="generate_post")
|
| 38 |
+
|
| 39 |
+
def generate_social_media_post(state) -> Command[Literal["evaluate_engagement"]]:
|
| 40 |
+
"""Generate a social media post for a B2B bank."""
|
| 41 |
+
prompt = f"""
|
| 42 |
+
You are a social media strategist for a B2B bank. Generate a {state["platform"]} post.
|
| 43 |
+
The post should:
|
| 44 |
+
- Be engaging but professional.
|
| 45 |
+
- Provide value to corporate clients.
|
| 46 |
+
- Focus on {state["topic"]}.
|
| 47 |
+
- Incorporate information from {state["tavily_results"]}
|
| 48 |
+
|
| 49 |
+
Output as plain text.
|
| 50 |
+
"""
|
| 51 |
+
response = model.invoke(prompt)
|
| 52 |
+
return Command(update={"post": response.content}, goto="evaluate_engagement")
|
| 53 |
+
|
| 54 |
+
def evaluate_engagement(state) -> Command[Literal["evaluate_tone"]]:
|
| 55 |
+
"""Assess how engaging the post is for LinkedIn/Instagram."""
|
| 56 |
+
prompt = f"""
|
| 57 |
+
Score the following post on engagement (1-10) basd on the provided social media platform.
|
| 58 |
+
Consider clarity, readability, and compelling call-to-action.
|
| 59 |
+
|
| 60 |
+
Platform: {state["platform"]}
|
| 61 |
+
Post: {state["post"]}
|
| 62 |
+
|
| 63 |
+
Respond with just a number.
|
| 64 |
+
"""
|
| 65 |
+
score = model.invoke(prompt)
|
| 66 |
+
return Command(update={"engagement_score": score.content}, goto="evaluate_tone")
|
| 67 |
+
|
| 68 |
+
def evaluate_tone(state) -> Command[Literal["evaluate_clarity"]]:
|
| 69 |
+
"""Check if the post maintains a professional yet engaging tone."""
|
| 70 |
+
prompt = f"""
|
| 71 |
+
Score the post’s tone (1-10). Ensure it's:
|
| 72 |
+
- Professional but not too rigid.
|
| 73 |
+
- Trustworthy and aligned with B2B financial services.
|
| 74 |
+
- Aligns with the specified platform.
|
| 75 |
+
|
| 76 |
+
Platform: {state["platform"]}
|
| 77 |
+
Post: {state["post"]}
|
| 78 |
+
|
| 79 |
+
Respond with just a number.
|
| 80 |
+
"""
|
| 81 |
+
score = model.invoke(prompt)
|
| 82 |
+
return Command(update={"tone_score": score.content}, goto="evaluate_clarity")
|
| 83 |
+
|
| 84 |
+
def evaluate_clarity(state) -> Command[Literal["revise_if_needed"]]:
|
| 85 |
+
"""Ensure the post is clear and not overly technical."""
|
| 86 |
+
prompt = f"""
|
| 87 |
+
Score the post on clarity (1-10).
|
| 88 |
+
- Avoids jargon.
|
| 89 |
+
- Easy to read for busy corporate professionals.
|
| 90 |
+
- Appropriate for the social media platform.
|
| 91 |
+
|
| 92 |
+
Platform: {state["platform"]}
|
| 93 |
+
Post: {state["post"]}
|
| 94 |
+
|
| 95 |
+
Respond with just a number.
|
| 96 |
+
"""
|
| 97 |
+
score = model.invoke(prompt)
|
| 98 |
+
return Command(update={"clarity_score": score.content}, goto="revise_if_needed")
|
| 99 |
+
|
| 100 |
+
def revise_if_needed(state) -> Command[Literal["get_image"]]:
|
| 101 |
+
"""Revise post if average evaluation score is below a threshold."""
|
| 102 |
+
scores = [int(state["engagement_score"]), int(state["tone_score"]), int(state["clarity_score"])]
|
| 103 |
+
avg_score = sum(scores) / len(scores)
|
| 104 |
+
|
| 105 |
+
if avg_score < 7: # Arbitrary threshold for revision
|
| 106 |
+
prompt = f"""
|
| 107 |
+
Revise this post to improve clarity, engagement, and tone:
|
| 108 |
+
|
| 109 |
+
{state["post"]}
|
| 110 |
+
|
| 111 |
+
Improve based on the following scores:
|
| 112 |
+
Engagement: {state["engagement_score"]}
|
| 113 |
+
Tone: {state["tone_score"]}
|
| 114 |
+
Clarity: {state["clarity_score"]}
|
| 115 |
+
"""
|
| 116 |
+
revised_post = model.invoke(prompt)
|
| 117 |
+
return Command(update={"post": revised_post.content}, goto="get_image")
|
| 118 |
+
|
| 119 |
+
return Command(goto="get_image")
|
| 120 |
+
|
| 121 |
+
def fetch_image(state) -> Command[Literal[END]]:
|
| 122 |
+
"""Fetch an image from Unsplash based on the provided text."""
|
| 123 |
+
prompt = f"""
|
| 124 |
+
You are a search optimization assistant. Your task is to take a topic and improve it to ensure the best image results from an image search API like Unsplash. Follow these steps:
|
| 125 |
+
|
| 126 |
+
1. **Normalize the input**: Convert all text to lowercase and remove special characters (except for spaces).
|
| 127 |
+
2. **Add more descriptive terms**: If the query is broad (e.g., "nature"), add more specific keywords like "landscape" or "outdoor" to help refine the search.
|
| 128 |
+
3. **Use synonyms and related terms**: For terms that could have multiple meanings or common synonyms, expand the query to include variations. For example, if the user queries "car", you can add "vehicle" or "automobile".
|
| 129 |
+
4. **Specify style and tone**: If the user provides a vague description, suggest adding words to define the style or mood of the image, such as "peaceful", "dramatic", or "colorful".
|
| 130 |
+
5. **Categorize the query**: If applicable, categorize the query into domains like "nature", "architecture", or "people" and add related terms (e.g., "urban", "portrait", "scenic").
|
| 131 |
+
|
| 132 |
+
**Example Inputs and Outputs:**
|
| 133 |
+
|
| 134 |
+
1. Input: "sunset over a beach"
|
| 135 |
+
Output: "sunset beach ocean horizon landscape"
|
| 136 |
+
|
| 137 |
+
2. Input: "car"
|
| 138 |
+
Output: "car vehicle automobile road transport"
|
| 139 |
+
|
| 140 |
+
3. Input: "nature"
|
| 141 |
+
Output: "nature landscape outdoor scenic green"
|
| 142 |
+
|
| 143 |
+
Topic: {state['topic']}
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
url = "https://api.pexels.com/v1/search"
|
| 147 |
+
|
| 148 |
+
params = {
|
| 149 |
+
"query": model.invoke(prompt).content,
|
| 150 |
+
"per_page": 5,
|
| 151 |
+
"page": 1
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
headers = {
|
| 155 |
+
"Authorization": ssm.get_parameter(Name="PEXEL_API_KEY", WithDecryption=True)['Parameter']['Value']
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
response = requests.get(url, headers=headers, params=params)
|
| 159 |
+
if response.status_code == 200:
|
| 160 |
+
data = response.json()
|
| 161 |
+
urls = []
|
| 162 |
+
for _, photo in enumerate(data['photos']):
|
| 163 |
+
urls.append(photo['url'])
|
| 164 |
+
|
| 165 |
+
return Command(update={"image_url": urls}, goto=END)
|
| 166 |
+
return Command(goto=END)
|
| 167 |
+
|
| 168 |
+
class State(TypedDict):
|
| 169 |
+
topic: str
|
| 170 |
+
platform: str
|
| 171 |
+
tips: str
|
| 172 |
+
tavily_results: List[dict]
|
| 173 |
+
post: str
|
| 174 |
+
engagement_score: int
|
| 175 |
+
tone_score: int
|
| 176 |
+
clarity_score: int
|
| 177 |
+
image_url: str
|
| 178 |
+
|
| 179 |
+
model = ChatGoogleGenerativeAI(
|
| 180 |
+
model="gemini-1.5-flash",
|
| 181 |
+
temperature=0,
|
| 182 |
+
max_tokens=None,
|
| 183 |
+
timeout=None,
|
| 184 |
+
max_retries=2,
|
| 185 |
+
google_api_key=ssm.get_parameter(Name="GOOGLE_API_KEY", WithDecryption=True)['Parameter']['Value']
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
workflow = StateGraph(State)
|
| 189 |
+
workflow.add_node("get_tips", get_platform_tips)
|
| 190 |
+
workflow.add_node("web_search", web_search)
|
| 191 |
+
workflow.add_node("generate_post", generate_social_media_post)
|
| 192 |
+
workflow.add_node("evaluate_engagement", evaluate_engagement)
|
| 193 |
+
workflow.add_node("evaluate_tone", evaluate_tone)
|
| 194 |
+
workflow.add_node("evaluate_clarity", evaluate_clarity)
|
| 195 |
+
workflow.add_node("revise_if_needed", revise_if_needed)
|
| 196 |
+
workflow.add_node("get_image", fetch_image)
|
| 197 |
+
|
| 198 |
+
workflow.add_edge(START, "get_tips")
|
| 199 |
+
graph = workflow.compile()
|
app.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import agent as ag
|
| 3 |
+
|
| 4 |
+
st.markdown("<h1 style='text-align: center;'>Social Media Content Generator</h1>", unsafe_allow_html=True)
|
| 5 |
+
|
| 6 |
+
with st.form("platform"):
|
| 7 |
+
topic = st.text_input(label="Topic")
|
| 8 |
+
platform = st.radio(label="Select a platform", options=["LinkedIn", "Instagram"])
|
| 9 |
+
|
| 10 |
+
if st.form_submit_button("Generate Content", use_container_width=True):
|
| 11 |
+
if topic:
|
| 12 |
+
with st.spinner(text="In progress...", show_time=True):
|
| 13 |
+
input_data = {"topic": topic, "platform": platform}
|
| 14 |
+
output = ag.graph.invoke(input_data)
|
| 15 |
+
st.markdown(output['post'])
|
| 16 |
+
markdown_links = " ".join([f"[Image {i+1}]({url})" for i, url in enumerate(output['image_url'])])
|
| 17 |
+
st.markdown(f"**Image URLs:** {markdown_links}")
|
| 18 |
+
else:
|
| 19 |
+
st.error("Please provide a topic.")
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
langchain-google-genai == 2.0.11
|
| 2 |
+
dotenv == 0.9.9
|
| 3 |
+
langchain-community == 0.3.19
|
| 4 |
+
langgraph == 0.3.8
|
| 5 |
+
streamlit == 1.43.2
|
| 6 |
+
boto3 == 1.37.13
|