Spaces:
No application file
No application file
Commit
·
ca330d5
1
Parent(s):
6c95d0c
Upload 7 files
Browse files- Logo.png +0 -0
- linkedin_post_generation.py +15 -0
- load_model.py +86 -0
- main.py +434 -0
- meta_content_creation.py +94 -0
- outline_generation.py +67 -0
- product_description_generation.py +33 -0
Logo.png
ADDED
|
linkedin_post_generation.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.prompts import PromptTemplate
|
| 2 |
+
from langchain.chains import LLMChain
|
| 3 |
+
|
| 4 |
+
def linkedIn_post_gen(topic, tone_of_voice, llm):
|
| 5 |
+
|
| 6 |
+
linkedIn_post_prompt = f"Write an engagging LinkedIn Post on {topic}. The tone should be {tone_of_voice}."
|
| 7 |
+
|
| 8 |
+
linkedIn_post_promptTemp = PromptTemplate(
|
| 9 |
+
input_variables=["text_input"],
|
| 10 |
+
template="You are a content creator and LinkedIn Posts writer :\n{text_input}\nLinkedIn Post:")
|
| 11 |
+
|
| 12 |
+
linkedIn_post_extraction_chain = LLMChain(llm=llm, prompt=linkedIn_post_promptTemp)
|
| 13 |
+
linkedIn_post = linkedIn_post_extraction_chain.run(linkedIn_post_prompt)
|
| 14 |
+
|
| 15 |
+
return linkedIn_post
|
load_model.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.llms import GooglePalm, LlamaCpp
|
| 2 |
+
|
| 3 |
+
def call_palm(google_api_key, temperature=0, max_tokens=8000, top_p=0.95, top_k=40, n_batch=9, repeat_penalty=1.1, n_ctx=8000):
|
| 4 |
+
|
| 5 |
+
'''
|
| 6 |
+
desc:
|
| 7 |
+
|
| 8 |
+
call_palm() is a fuction can be used to instantiate a Google Palm model.
|
| 9 |
+
this model can be used to generate text, translate languages, write different kinds of creative content, and answer your questions in an informative way.
|
| 10 |
+
'''
|
| 11 |
+
|
| 12 |
+
'''
|
| 13 |
+
Params and args:
|
| 14 |
+
|
| 15 |
+
google_api_key (str): Required Parameter -> The Google API key for the Palm model.
|
| 16 |
+
temperature (float): Optional Parameter -> The temperature parameter controls the randomness of the generated text. A higher temperature will result in more creative and varied text, but it may also be less accurate.
|
| 17 |
+
max_output_tokens (int): Optional Parameter -> The maximum number of tokens to generate.
|
| 18 |
+
top_p (float): Optional Parameter -> The top_p parameter controls the diversity of the generated text. A higher top_p will result in more diverse text, but it may also be less coherent.
|
| 19 |
+
top_k (int): Optional Parameter -> The top_k parameter controls the number of tokens to consider when generating text. A higher top_k will result in more accurate text, but it may also be less creative.
|
| 20 |
+
n_batch (int): Optional Parameter -> The n_batch parameter controls the number of batches to use when generating text. A higher n_batch will result in faster generation, but it may also be less accurate.
|
| 21 |
+
repeat_penalty (float): Optional Parameter -> The repeat_penalty parameter controls the penalty for repeating tokens. A higher repeat_penalty will result in more diverse text, but it may also be less fluent.
|
| 22 |
+
n_ctx (int): Optional Parameter -> The n_ctx parameter controls the context length used to generate text. A higher n_ctx will result in more coherent text, but it may also be slower to generate.
|
| 23 |
+
'''
|
| 24 |
+
|
| 25 |
+
'''
|
| 26 |
+
return:
|
| 27 |
+
|
| 28 |
+
This function returns Google Palm as language model object.
|
| 29 |
+
This object can be used to generate text, translate languages, write different kinds of creative content, and answer your questions in an informative way.
|
| 30 |
+
'''
|
| 31 |
+
|
| 32 |
+
google_palm_model = GooglePalm(
|
| 33 |
+
|
| 34 |
+
google_api_key=google_api_key,
|
| 35 |
+
temperature=temperature,
|
| 36 |
+
max_output_tokens=max_tokens,
|
| 37 |
+
top_p=top_p,
|
| 38 |
+
top_k=top_k,
|
| 39 |
+
n_batch=n_batch,
|
| 40 |
+
repeat_penalty = repeat_penalty,
|
| 41 |
+
n_ctx = n_ctx
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
return google_palm_model
|
| 45 |
+
|
| 46 |
+
def call_llama2(model_path, temperature=0, max_tokens=8192, top_p=0.95, top_k=40, n_batch=9, repeat_penalty=1.1, n_ctx=8192):
|
| 47 |
+
|
| 48 |
+
'''
|
| 49 |
+
desc:
|
| 50 |
+
call_llama() is a fuction can be used to instantiate a Meta llama-2 13B model.
|
| 51 |
+
this model can be used to generate text, translate languages, write different kinds of creative content, and answer your questions in an informative way.
|
| 52 |
+
'''
|
| 53 |
+
|
| 54 |
+
'''
|
| 55 |
+
Params and args:
|
| 56 |
+
|
| 57 |
+
model_path (str): Required Parameter -> The path to the Llama model file.
|
| 58 |
+
temperature (float): Optional Parameter -> The temperature parameter controls the randomness of the generated text. A higher temperature will result in more creative and varied text, but it may also be less accurate.
|
| 59 |
+
max_tokens (int): Optional Parameter -> The maximum number of tokens to generate.
|
| 60 |
+
top_p (float): Optional Parameter -> The top_p parameter controls the diversity of the generated text. A higher top_p will result in more diverse text, but it may also be less coherent.
|
| 61 |
+
top_k (int): Optional Parameter -> The top_k parameter controls the number of tokens to consider when generating text. A higher top_k will result in more accurate text, but it may also be less creative.
|
| 62 |
+
n_batch (int): Optional Parameter -> The n_batch parameter controls the number of batches to use when generating text. A higher n_batch will result in faster generation, but it may also be less accurate.
|
| 63 |
+
repeat_penalty (float): Optional Parameter -> The repeat_penalty parameter controls the penalty for repeating tokens. A higher repeat_penalty will result in more diverse text, but it may also be less fluent.
|
| 64 |
+
n_ctx (int): Optional Parameter -> The n_ctx parameter controls the context length used to generate text. A higher n_ctx will result in more coherent text, but it may also be slower to generate.
|
| 65 |
+
'''
|
| 66 |
+
|
| 67 |
+
'''
|
| 68 |
+
return:
|
| 69 |
+
|
| 70 |
+
This function returns Meta llama-2 13B as language model object.
|
| 71 |
+
This object can be used to generate text, translate languages, write different kinds of creative content, and answer your questions in an informative way.
|
| 72 |
+
'''
|
| 73 |
+
|
| 74 |
+
llm = LlamaCpp(
|
| 75 |
+
model_path=model_path,
|
| 76 |
+
temperature=temperature,
|
| 77 |
+
max_tokens=max_tokens,
|
| 78 |
+
top_p=top_p,
|
| 79 |
+
top_k=top_k,
|
| 80 |
+
n_batch=n_batch,
|
| 81 |
+
repeat_penalty=repeat_penalty,
|
| 82 |
+
n_ctx=n_ctx
|
| 83 |
+
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
return llm
|
main.py
ADDED
|
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from remove_astricks import remove_ast
|
| 2 |
+
from load_model import call_palm
|
| 3 |
+
from keywords_generation import keywords_generator, filter_keywords, process_keywords
|
| 4 |
+
from idea_generation import ideas_generator, filter_ideas
|
| 5 |
+
from outline_generation import outlines_generator, filtered_outlines
|
| 6 |
+
from article_generation import full_article, rephrase
|
| 7 |
+
from product_description_generation import product_description_gen
|
| 8 |
+
from x_thread_generation import X_thread_gen, X_thread_gen_intro
|
| 9 |
+
from linkedin_post_generation import linkedIn_post_gen
|
| 10 |
+
from insta_image_caption_generation import img2text, generate_InstaCap
|
| 11 |
+
from facebook_post_generation import facebook_post_gen
|
| 12 |
+
from facebook_ads_generation import facebook_ads_gen
|
| 13 |
+
from facebook_campaign import facbook_camp_gen, social_media_camp_gen
|
| 14 |
+
from linkedIn_ads_generation import linkedIn_ads_gen
|
| 15 |
+
from email_marketing_campaigns_generation import email_marketing_campaigns_gen
|
| 16 |
+
from workout_plan_generation import workout_plan_gen
|
| 17 |
+
from landing_page_generation import landing_page_gen
|
| 18 |
+
from blog_post_generation import blog_post_gen
|
| 19 |
+
from x_bio_creation import x_bio_gen
|
| 20 |
+
from x_retweet_commenting_generation import x_retweet_commenting_gen
|
| 21 |
+
from article_conversion_to_x_thread import article_to_x_thread_gen
|
| 22 |
+
from x_campaign_generation import x_camp_gen
|
| 23 |
+
from blog_ideas_generation import blog_idea_gen
|
| 24 |
+
from blog_ideas_description_generation import blog_idea_desc_gen
|
| 25 |
+
from blog_post_tags_generation import blog_tags_gen
|
| 26 |
+
import X_content_creation
|
| 27 |
+
import blog_content_creation
|
| 28 |
+
import meta_content_creation
|
| 29 |
+
import email_writer
|
| 30 |
+
|
| 31 |
+
import streamlit as st
|
| 32 |
+
from dotenv import load_dotenv, find_dotenv
|
| 33 |
+
import os
|
| 34 |
+
import time
|
| 35 |
+
|
| 36 |
+
load_dotenv(find_dotenv())
|
| 37 |
+
google_api_key = os.environ['GOOGLE_API_KEY']
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
if __name__ == '__main__':
|
| 41 |
+
|
| 42 |
+
llm = call_palm(google_api_key)
|
| 43 |
+
|
| 44 |
+
tone_of_voice_list = ('Excited', 'Professional', 'Encouraging', 'Funny', 'Dramatic', 'Witty', 'Sarcastic', 'Engaging', 'Creative')
|
| 45 |
+
creativity_list = ('Original', 'Creative', 'Visionary')
|
| 46 |
+
|
| 47 |
+
with st.sidebar:
|
| 48 |
+
|
| 49 |
+
st.image('Logo.png')
|
| 50 |
+
choice = st.radio("Navigation", ["Articles and Blogs", "Product Description", "LinkedIn Post", "LinkedIn Ads", "Gym Workout Plan", "Landing Page", "X Content Creation", "Meta Content", "Emails"])
|
| 51 |
+
st.info("Generate your Content with Feather AI in Seconds..")
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
if choice == 'Product Description':
|
| 56 |
+
|
| 57 |
+
product_name = st.text_input('Product Name')
|
| 58 |
+
product_desc = st.text_area('Product Description')
|
| 59 |
+
tone_of_voice = st.selectbox('Tone of Voice', tone_of_voice_list)
|
| 60 |
+
|
| 61 |
+
if product_name and product_desc and tone_of_voice:
|
| 62 |
+
|
| 63 |
+
if st.button('Generate'):
|
| 64 |
+
|
| 65 |
+
product_description = product_description_gen(product_name, product_desc, tone_of_voice, llm)
|
| 66 |
+
st.markdown(product_description)
|
| 67 |
+
|
| 68 |
+
elif choice == 'LinkedIn Post':
|
| 69 |
+
|
| 70 |
+
topic = st.text_input("Topic")
|
| 71 |
+
tone_of_voice = st.selectbox('Tone of Voice', tone_of_voice_list)
|
| 72 |
+
|
| 73 |
+
if topic and tone_of_voice:
|
| 74 |
+
|
| 75 |
+
if st.button('Generate'):
|
| 76 |
+
|
| 77 |
+
linkedIn_post = linkedIn_post_gen(topic, tone_of_voice, llm)
|
| 78 |
+
st.markdown(linkedIn_post)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
elif choice == "LinkedIn Ads":
|
| 82 |
+
|
| 83 |
+
product_name = st.text_input("Product Name")
|
| 84 |
+
product_desc = st.text_area("Product Description")
|
| 85 |
+
target_audience = st.text_input("Target Audience")
|
| 86 |
+
target_keywords = st.text_input("Target Keywords")
|
| 87 |
+
|
| 88 |
+
if product_name and product_desc and target_audience and target_keywords:
|
| 89 |
+
|
| 90 |
+
if st.button("Generate"):
|
| 91 |
+
|
| 92 |
+
linkedIn_ad = linkedIn_ads_gen(product_name, product_desc, target_audience, target_keywords, llm)
|
| 93 |
+
st.markdown(linkedIn_ad)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
elif choice == "Gym Workout Plan":
|
| 97 |
+
|
| 98 |
+
fitness_level_list = ('Beginner', 'Intermediate', 'Advanced', 'Elite')
|
| 99 |
+
health_consd_list = ('Cutting Phase', 'Bulking Phase', 'Maintenance Phase', 'Lean Muscle Gain')
|
| 100 |
+
routine_list = ('Bro Split', 'Push Pull Leg', 'Upper and Lower', 'Full Body')
|
| 101 |
+
|
| 102 |
+
fitness_level = st.selectbox('Fitness Level', fitness_level_list)
|
| 103 |
+
days = st.slider('Avaliable Days Per Week',1,7,4)
|
| 104 |
+
hours = st.slider('Avaliable Hours Per Day',1,6,2)
|
| 105 |
+
health_consd = st.selectbox('Health Considerations',health_consd_list)
|
| 106 |
+
routine = st.selectbox("Preferred Routine", routine_list)
|
| 107 |
+
my_goals = st.text_input('Goals')
|
| 108 |
+
|
| 109 |
+
if fitness_level and days and hours and health_consd and routine and my_goals:
|
| 110 |
+
|
| 111 |
+
if st.button('Generate'):
|
| 112 |
+
|
| 113 |
+
workout_plan = workout_plan_gen(my_goals, fitness_level, days, hours, health_consd, routine, llm)
|
| 114 |
+
st.markdown(workout_plan)
|
| 115 |
+
|
| 116 |
+
elif choice == "Landing Page":
|
| 117 |
+
|
| 118 |
+
#product_name = st.text_input('Product Name')
|
| 119 |
+
#product_desc = st.text_area('Description')
|
| 120 |
+
#target_audience = st.text_input('Target Audience')
|
| 121 |
+
#goal = st.text_input('Goal')
|
| 122 |
+
st.title('Not Working')
|
| 123 |
+
#if product_name and product_desc and target_audience and goal:
|
| 124 |
+
|
| 125 |
+
#if st.button('Generate'):
|
| 126 |
+
|
| 127 |
+
#landing_page = landing_page_gen(product_name, product_desc, target_audience, goal, llm)
|
| 128 |
+
#st.markdown(landing_page)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
elif choice == 'X Content Creation':
|
| 132 |
+
|
| 133 |
+
X_content = X_content_creation.X_Generator(llm)
|
| 134 |
+
|
| 135 |
+
x_choices_list = ('X thread', 'X Retweet Commenting', 'X Campaign', 'X Bio Creation', 'Article Conversion to X Thread')
|
| 136 |
+
|
| 137 |
+
x_choices = st.selectbox('Choose X Template', x_choices_list)
|
| 138 |
+
|
| 139 |
+
if x_choices == x_choices_list[0]:
|
| 140 |
+
|
| 141 |
+
topic = st.text_input('Topic')
|
| 142 |
+
num_tweets = st.slider('Number of Thread tweet', 1,10,5)
|
| 143 |
+
tone_of_voice = st.selectbox('Tone of Voice', tone_of_voice_list)
|
| 144 |
+
|
| 145 |
+
if topic and num_tweets and tone_of_voice:
|
| 146 |
+
|
| 147 |
+
if st.button('Generate'):
|
| 148 |
+
|
| 149 |
+
X_thread = X_content.X_thread_gen(topic, num_tweets, tone_of_voice)
|
| 150 |
+
intro = X_content.X_thread_gen_intro(topic, X_thread, tone_of_voice)
|
| 151 |
+
st.markdown(intro)
|
| 152 |
+
st.markdown(X_thread)
|
| 153 |
+
|
| 154 |
+
elif x_choices == x_choices_list[1]:
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
tweet = st.text_area('Tweet')
|
| 158 |
+
tone_of_voice = st.selectbox('Tone of voice', tone_of_voice_list)
|
| 159 |
+
|
| 160 |
+
if tweet and tone_of_voice:
|
| 161 |
+
|
| 162 |
+
if st.button('Generate'):
|
| 163 |
+
|
| 164 |
+
x_retweet_comment = X_content.x_retweet_commenting_gen(tweet, tone_of_voice)
|
| 165 |
+
st.markdown(x_retweet_comment)
|
| 166 |
+
|
| 167 |
+
elif x_choices == x_choices_list[2]:
|
| 168 |
+
|
| 169 |
+
product_name = st.text_input('Product Name')
|
| 170 |
+
product_desc = st.text_area('Product Description')
|
| 171 |
+
goal = st.text_input('Goal')
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
if product_name and product_desc and goal:
|
| 175 |
+
|
| 176 |
+
if st.button('Generate'):
|
| 177 |
+
|
| 178 |
+
x_camp = X_content.x_camp_gen(product_name, product_desc, goal)
|
| 179 |
+
st.markdown(x_camp)
|
| 180 |
+
|
| 181 |
+
elif x_choices == x_choices_list[3]:
|
| 182 |
+
|
| 183 |
+
info = st.text_area('info')
|
| 184 |
+
tone_of_voice = st.selectbox('Tone of voice', tone_of_voice_list)
|
| 185 |
+
|
| 186 |
+
if info and tone_of_voice:
|
| 187 |
+
|
| 188 |
+
if st.button('Generate'):
|
| 189 |
+
|
| 190 |
+
x_bio = X_content.x_bio_gen(info, tone_of_voice, llm)
|
| 191 |
+
st.markdown(x_bio)
|
| 192 |
+
|
| 193 |
+
elif x_choices == x_choices_list[4]:
|
| 194 |
+
|
| 195 |
+
article = st.text_area('Article')
|
| 196 |
+
|
| 197 |
+
if article:
|
| 198 |
+
|
| 199 |
+
if st.button('Generate'):
|
| 200 |
+
|
| 201 |
+
article_to_x_thread = X_content.article_to_x_thread_gen(article)
|
| 202 |
+
st.markdown(article_to_x_thread)
|
| 203 |
+
|
| 204 |
+
elif choice == 'Articles and Blogs':
|
| 205 |
+
|
| 206 |
+
blog_content = blog_content_creation.blog_content_generation(llm)
|
| 207 |
+
|
| 208 |
+
blog_choices_list = ('Article Writer', 'Blog Post', 'Blog Ideas', 'Blog Tags', 'Blog Ideas description')
|
| 209 |
+
|
| 210 |
+
blog_choices = st.selectbox('Choose Blog Template', blog_choices_list)
|
| 211 |
+
|
| 212 |
+
if blog_choices == blog_choices_list[0]:
|
| 213 |
+
|
| 214 |
+
topic = st.text_input('Topic')
|
| 215 |
+
tone_of_voice = st.selectbox('Tone of Voice', tone_of_voice_list)
|
| 216 |
+
|
| 217 |
+
if topic:
|
| 218 |
+
|
| 219 |
+
keywords = keywords_generator(topic, llm)
|
| 220 |
+
filtered_keywords = filter_keywords(keywords)
|
| 221 |
+
formatted_keywords = process_keywords(filtered_keywords)
|
| 222 |
+
|
| 223 |
+
st.markdown('### Generated Keywords:\n')
|
| 224 |
+
st.markdown(formatted_keywords)
|
| 225 |
+
|
| 226 |
+
formatted_keywords = remove_ast(formatted_keywords)
|
| 227 |
+
|
| 228 |
+
idea_numbers = st.slider("How many ideas you want?",1,10,5)
|
| 229 |
+
|
| 230 |
+
ideas = ideas_generator(topic, formatted_keywords, llm, tone_of_voice, idea_numbers)
|
| 231 |
+
filtered_ideas = filter_ideas(ideas)
|
| 232 |
+
|
| 233 |
+
index = 1
|
| 234 |
+
|
| 235 |
+
st.markdown("### Generated Ideas:\n")
|
| 236 |
+
for idea in filtered_ideas:
|
| 237 |
+
|
| 238 |
+
st.markdown(f"{index} - {idea}")
|
| 239 |
+
index+=1
|
| 240 |
+
|
| 241 |
+
st.text('\n')
|
| 242 |
+
num_idea = st.text_input("Choose the idea you want by number")
|
| 243 |
+
|
| 244 |
+
if num_idea:
|
| 245 |
+
|
| 246 |
+
num_idea = int(num_idea)
|
| 247 |
+
idea = filtered_ideas[num_idea-1]
|
| 248 |
+
idea = remove_ast(idea)
|
| 249 |
+
|
| 250 |
+
outline = outlines_generator(idea,formatted_keywords, llm)
|
| 251 |
+
st.text('\n')
|
| 252 |
+
st.markdown("### Generated Outline:\n")
|
| 253 |
+
st.markdown(outline)
|
| 254 |
+
outline_list = filtered_outlines(outline)
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
if st.button('Generate'):
|
| 258 |
+
|
| 259 |
+
article = full_article(idea, outline_list, tone_of_voice, llm)
|
| 260 |
+
|
| 261 |
+
st.markdown("# Your Article:\n")
|
| 262 |
+
st.markdown(f"{idea}\n")
|
| 263 |
+
st.markdown('\n\n'.join(article))
|
| 264 |
+
st.text('\n\n')
|
| 265 |
+
num_art_chars = len('\n'.join(article))
|
| 266 |
+
num_art_words = len('\n'.join(article).split(' '))
|
| 267 |
+
|
| 268 |
+
st.markdown(f"{num_art_chars} Characters")
|
| 269 |
+
st.markdown(f"{num_art_words} Words")
|
| 270 |
+
|
| 271 |
+
elif blog_choices == blog_choices_list[1]:
|
| 272 |
+
|
| 273 |
+
topic = st.text_input('Topic')
|
| 274 |
+
|
| 275 |
+
if topic:
|
| 276 |
+
|
| 277 |
+
if st.button('Generate'):
|
| 278 |
+
blog_post = blog_content.blog_post_gen(topic)
|
| 279 |
+
st.markdown(blog_post)
|
| 280 |
+
|
| 281 |
+
elif blog_choices == blog_choices_list[2]:
|
| 282 |
+
|
| 283 |
+
topic = st.text_input('Topic')
|
| 284 |
+
|
| 285 |
+
if topic:
|
| 286 |
+
|
| 287 |
+
if st.button('Generate'):
|
| 288 |
+
|
| 289 |
+
blog_idea = blog_content.blog_idea_gen(topic)
|
| 290 |
+
st.markdown(blog_idea)
|
| 291 |
+
|
| 292 |
+
elif blog_choices == blog_choices_list[4]:
|
| 293 |
+
|
| 294 |
+
blog = st.text_area('Blog')
|
| 295 |
+
|
| 296 |
+
if blog:
|
| 297 |
+
|
| 298 |
+
if st.button('Generate'):
|
| 299 |
+
|
| 300 |
+
blog_idea_desc = blog_content.blog_idea_desc_gen(blog)
|
| 301 |
+
st.markdown(blog_idea_desc)
|
| 302 |
+
|
| 303 |
+
elif blog_choices == blog_choices_list[3]:
|
| 304 |
+
|
| 305 |
+
blog = st.text_area('Blog')
|
| 306 |
+
|
| 307 |
+
if blog:
|
| 308 |
+
|
| 309 |
+
if st.button('Generate'):
|
| 310 |
+
|
| 311 |
+
blog_tags = blog_content.blog_tags_gen(blog)
|
| 312 |
+
st.markdown(blog_tags)
|
| 313 |
+
|
| 314 |
+
elif choice == 'Meta Content':
|
| 315 |
+
|
| 316 |
+
meta_content_gen = meta_content_creation.meta_content_generation(llm)
|
| 317 |
+
|
| 318 |
+
meta_choices_list = ('Facebook Post', 'Facebook Ads', 'Facebook Campaign', 'Insta Image Captioning')
|
| 319 |
+
|
| 320 |
+
meta_choices = st.selectbox('Choose Blog Template', meta_choices_list)
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
if meta_choices == meta_choices_list[0]:
|
| 324 |
+
|
| 325 |
+
topic = st.text_input("Topic")
|
| 326 |
+
|
| 327 |
+
tone_of_voice = st.selectbox('Tone of Voice', tone_of_voice_list)
|
| 328 |
+
|
| 329 |
+
if topic and tone_of_voice:
|
| 330 |
+
|
| 331 |
+
if st.button('Generate'):
|
| 332 |
+
|
| 333 |
+
facebook_post = meta_content_gen.facebook_post_gen(tone_of_voice, topic)
|
| 334 |
+
st.markdown(facebook_post)
|
| 335 |
+
|
| 336 |
+
elif meta_choices == meta_choices_list[1]:
|
| 337 |
+
|
| 338 |
+
product_name = st.text_input('Product Name')
|
| 339 |
+
product_desc = st.text_area('Description')
|
| 340 |
+
targeted_audience = st.text_input('Target Audience')
|
| 341 |
+
plans_proms = st.text_input('Plan and Promotions')
|
| 342 |
+
|
| 343 |
+
tone_of_voice = st.selectbox('Tone of Voice', tone_of_voice_list)
|
| 344 |
+
creativity = st.selectbox('Creativity', creativity_list)
|
| 345 |
+
|
| 346 |
+
if product_name and product_desc and tone_of_voice:
|
| 347 |
+
|
| 348 |
+
if st.button('Generate'):
|
| 349 |
+
|
| 350 |
+
face_ad__prom = meta_content_gen.facebook_ads_gen(product_name, product_desc, tone_of_voice, targeted_audience, plans_proms)
|
| 351 |
+
st.markdown(face_ad__prom)
|
| 352 |
+
|
| 353 |
+
elif meta_choices == meta_choices_list[2]:
|
| 354 |
+
|
| 355 |
+
product_name = st.text_input('Product Name')
|
| 356 |
+
product_desc = st.text_area('Product Description')
|
| 357 |
+
days = st.selectbox('Days', (10,15,20,25,30))
|
| 358 |
+
goal = st.text_input('Goal')
|
| 359 |
+
|
| 360 |
+
if product_name and product_desc and goal and days:
|
| 361 |
+
|
| 362 |
+
if st.button('Generate'):
|
| 363 |
+
|
| 364 |
+
facbook_camp = meta_content_gen.facbook_camp_gen(product_name, product_desc, days, goal)
|
| 365 |
+
st.markdown(facbook_camp)
|
| 366 |
+
|
| 367 |
+
elif meta_choices == meta_choices_list[3]:
|
| 368 |
+
|
| 369 |
+
uploaded_file = st.file_uploader("Choose an image...", type=["jpg","jpeg","png"])
|
| 370 |
+
|
| 371 |
+
if uploaded_file is not None:
|
| 372 |
+
|
| 373 |
+
bytes_data = uploaded_file.getvalue()
|
| 374 |
+
with open(uploaded_file.name, "wb") as file:
|
| 375 |
+
file.write(bytes_data)
|
| 376 |
+
|
| 377 |
+
st.image(uploaded_file, caption="Uploaded image",
|
| 378 |
+
use_column_width=True, width=500)
|
| 379 |
+
|
| 380 |
+
scenario = meta_content_gen.img2text(uploaded_file.name)
|
| 381 |
+
|
| 382 |
+
st.subheader("Image Scenario:")
|
| 383 |
+
with st.expander("scenario"):
|
| 384 |
+
st.write(scenario)
|
| 385 |
+
|
| 386 |
+
tone_of_voice = st.selectbox('Tone of Voice', tone_of_voice_list)
|
| 387 |
+
form = st.selectbox('Caption Form', ('Short Form', 'Medium Form', 'Long Form'))
|
| 388 |
+
|
| 389 |
+
if st.button('Generate'):
|
| 390 |
+
|
| 391 |
+
st.subheader("Generated Instagram Image according to the scenario:")
|
| 392 |
+
instaCap = meta_content_gen.generate_InstaCap(scenario, tone_of_voice, form)
|
| 393 |
+
st.markdown(instaCap)
|
| 394 |
+
|
| 395 |
+
elif choice == 'Emails':
|
| 396 |
+
|
| 397 |
+
email_content_gen = email_writer.email_writing(llm)
|
| 398 |
+
|
| 399 |
+
email_choices_list = ('Email Writer', 'Email Marketing Campaign')
|
| 400 |
+
|
| 401 |
+
email_choices = st.selectbox('Choose Emai Template', email_choices_list)
|
| 402 |
+
|
| 403 |
+
if email_choices == email_choices_list[0]:
|
| 404 |
+
|
| 405 |
+
recipient = st.text_input('Recipient')
|
| 406 |
+
recipient_position = st.text_input('Recipient Position')
|
| 407 |
+
sender_name = st.text_input('Sender Name')
|
| 408 |
+
position_sender = st.text_input('Sender Position')
|
| 409 |
+
description = st.text_area('Description')
|
| 410 |
+
|
| 411 |
+
if recipient and recipient_position and sender_name and position_sender and description:
|
| 412 |
+
|
| 413 |
+
if st.button('Generate'):
|
| 414 |
+
|
| 415 |
+
email = email_content_gen.email_gen(recipient, recipient_position, sender_name, position_sender, description)
|
| 416 |
+
email_subject = email_content_gen.email_subject_gen(email)
|
| 417 |
+
st.markdown(f'Subject: {email_subject}')
|
| 418 |
+
st.markdown(email)
|
| 419 |
+
|
| 420 |
+
elif email_choices == email_choices_list[1]:
|
| 421 |
+
|
| 422 |
+
product_name = st.text_input("Product Name")
|
| 423 |
+
product_description = st.text_area("Product Description")
|
| 424 |
+
target_audience = st.text_input("Target Audience")
|
| 425 |
+
goal = st.text_input("Goal")
|
| 426 |
+
|
| 427 |
+
if product_name and product_description and target_audience and goal:
|
| 428 |
+
|
| 429 |
+
if st.button("Generate"):
|
| 430 |
+
|
| 431 |
+
email_marketing_camp = email_content_gen.email_marketing_campaigns_gen(product_name, product_description, target_audience, goal)
|
| 432 |
+
st.markdown(email_marketing_camp)
|
| 433 |
+
|
| 434 |
+
|
meta_content_creation.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.prompts import PromptTemplate
|
| 2 |
+
from langchain.chains import LLMChain
|
| 3 |
+
from transformers import pipeline
|
| 4 |
+
|
| 5 |
+
class meta_content_generation:
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def __init__(self, llm):
|
| 9 |
+
self.llm = llm
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def facebook_ads_gen(self, product_name, product_description, tone_of_voice, targeted_audience="", plans_promotions=""):
|
| 13 |
+
|
| 14 |
+
if targeted_audience != "" and plans_promotions != "":
|
| 15 |
+
facebook_ads_prompt = f"Generate a Facebook ad for {product_name} Product. {product_name} is {product_description}. Our Target Audience is {targeted_audience}. Our plans and promotions is {plans_promotions}. Tone of the ad should be {tone_of_voice}"
|
| 16 |
+
|
| 17 |
+
elif targeted_audience == "" and plans_promotions != "":
|
| 18 |
+
facebook_ads_prompt = f"Generate a Facebook ad for {product_name} Product. {product_name} is {product_description}. Our plans and promotions is {plans_promotions}. Tone of the ad should be {tone_of_voice}"
|
| 19 |
+
|
| 20 |
+
elif targeted_audience != "" and plans_promotions == "":
|
| 21 |
+
facebook_ads_prompt = f"Generate a Facebook ad for {product_name} Product. {product_name} is {product_description}. Our Target Audience is {targeted_audience}. Tone of the ad should be{tone_of_voice}."
|
| 22 |
+
|
| 23 |
+
else:
|
| 24 |
+
facebook_ads_prompt = f"Generate a Facebook ad for {product_name} Product. {product_name} is {product_description}. Tone of the ad Should be {tone_of_voice}."
|
| 25 |
+
|
| 26 |
+
facebook_ads_promptTemp = PromptTemplate(
|
| 27 |
+
input_variables=["text_input"],
|
| 28 |
+
template="You are a Professional Facebook Ad Copywriter:\n{text_input}\nFacebook Ad:")
|
| 29 |
+
|
| 30 |
+
facebook_ad_extraction_chain = LLMChain(llm=self.llm, prompt=facebook_ads_promptTemp)
|
| 31 |
+
facebook_ad = facebook_ad_extraction_chain.run(facebook_ads_prompt)
|
| 32 |
+
|
| 33 |
+
return facebook_ad
|
| 34 |
+
|
| 35 |
+
def facbook_camp_gen(self, product_name, product_desc, days, goal):
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
facebook_ads_prompt = f"Generate a {days} days Facebook campaign (no budget included) calendar for our {product_name}. {product_name} is {product_desc}. with the goal to {goal}."
|
| 39 |
+
print(facebook_ads_prompt)
|
| 40 |
+
facebook_ads_promptTemp = PromptTemplate(
|
| 41 |
+
input_variables=["text_input"],
|
| 42 |
+
template="""You are a Professional Facebook Digital Marketer:\n{text_input}\nGenerate only the Facebook campaign Calender without any details and don't mention any budgets:\nExample to emulate it:\nWeek 1: Getting Started and Teasers
|
| 43 |
+
|
| 44 |
+
Day 1-2: Introduction to FAQGenius, share its features and benefits.
|
| 45 |
+
Day 3-4: Teaser posts about how FAQGenius can save time and improve customer satisfaction.
|
| 46 |
+
Day 5-7: User testimonials and success stories with FAQGenius." and so on..
|
| 47 |
+
""")
|
| 48 |
+
facebook_ad_extraction_chain = LLMChain(llm=self.llm, prompt=facebook_ads_promptTemp)
|
| 49 |
+
facebook_ad = facebook_ad_extraction_chain.run(facebook_ads_prompt)
|
| 50 |
+
|
| 51 |
+
return facebook_ad
|
| 52 |
+
|
| 53 |
+
def facebook_post_gen(self, tone_of_voice, topic):
|
| 54 |
+
|
| 55 |
+
productDesc_prompt = f"Write an attractive facebook post on {topic}. Tone should be {tone_of_voice}. Post dosen't include any photos or videos."
|
| 56 |
+
|
| 57 |
+
productDesc_promptTemp = PromptTemplate(
|
| 58 |
+
input_variables=["text_input"],
|
| 59 |
+
template="You are a professional facebook content creator:\n{text_input}\n\nFacebook Post:")
|
| 60 |
+
|
| 61 |
+
productDesc_extraction_chain = LLMChain(llm=self.llm, prompt=productDesc_promptTemp)
|
| 62 |
+
product_desc = productDesc_extraction_chain.run(productDesc_prompt)
|
| 63 |
+
|
| 64 |
+
return product_desc
|
| 65 |
+
|
| 66 |
+
def img2text(self, url):
|
| 67 |
+
|
| 68 |
+
image_to_text = pipeline("image-to-text", model='Salesforce/blip-image-captioning-base')
|
| 69 |
+
|
| 70 |
+
text = image_to_text(url)
|
| 71 |
+
|
| 72 |
+
out = text[0]['generated_text']
|
| 73 |
+
|
| 74 |
+
return out
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def generate_InstaCap(self, scenario, tone_of_voice, form):
|
| 78 |
+
|
| 79 |
+
instaCap_prompt = f"Craft a {form} Caption on my Instagram Image Here is the description of my Instagram Image: {scenario}.\nThe tone should be {tone_of_voice}"
|
| 80 |
+
|
| 81 |
+
instaCap_promptTemp = PromptTemplate(
|
| 82 |
+
input_variables=["text_input"],
|
| 83 |
+
template="You are infulencer:\n{text_input}\nInstagram Caption:")
|
| 84 |
+
|
| 85 |
+
instaCap_extraction_chain = LLMChain(llm=self.llm, prompt=instaCap_promptTemp)
|
| 86 |
+
instaCap = instaCap_extraction_chain.run(instaCap_prompt)
|
| 87 |
+
|
| 88 |
+
return instaCap
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
|
outline_generation.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.prompts import PromptTemplate
|
| 2 |
+
from langchain.chains import LLMChain
|
| 3 |
+
import re
|
| 4 |
+
from remove_astricks import remove_ast
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def outlines_generator(idea, keywords, llm):
|
| 8 |
+
|
| 9 |
+
'''
|
| 10 |
+
Description:
|
| 11 |
+
|
| 12 |
+
The outlines_generator() function designed to generate an outline for an article based on a given idea and a set of keywords.
|
| 13 |
+
The function leverages an LLM model to create a structured outline with section headings.
|
| 14 |
+
'''
|
| 15 |
+
|
| 16 |
+
'''
|
| 17 |
+
Parameters:
|
| 18 |
+
|
| 19 |
+
idea (str): The main idea or topic of the article for which you want to generate an outline.
|
| 20 |
+
keywords (str): A set of keywords or topic-related terms that will be used as section headings in the outline. These headings help organize the content and provide a structure for the article.
|
| 21 |
+
llm (langchain.llms object): An instance of a pre-trained language model (e.g., Google Palm or llama2) that will be used to generate the article outline.
|
| 22 |
+
'''
|
| 23 |
+
|
| 24 |
+
'''
|
| 25 |
+
Returns:
|
| 26 |
+
|
| 27 |
+
outlines (str): The generated outline for the article, including section headings and placeholders for content under each heading.
|
| 28 |
+
'''
|
| 29 |
+
|
| 30 |
+
outlines_prompt = f"Generate an table of contents with at least 10 main points for an article on {idea}. Include key points related to {keywords}.\nBe creative and innovation in each main topic"
|
| 31 |
+
|
| 32 |
+
outlines_promptTemp = PromptTemplate(
|
| 33 |
+
input_variables=["text_input"],
|
| 34 |
+
template="You are a content creator\n{text_input}\n\nTable of Content (number each main point with roman numerals):")
|
| 35 |
+
|
| 36 |
+
outlines_extraction_chain = LLMChain(llm=llm, prompt=outlines_promptTemp)
|
| 37 |
+
outlines = outlines_extraction_chain.run(outlines_prompt)
|
| 38 |
+
|
| 39 |
+
return outlines
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def filtered_outlines(outline):
|
| 43 |
+
|
| 44 |
+
'''
|
| 45 |
+
Description:
|
| 46 |
+
This function processes an outline text containing Roman numeral formatted section headers,
|
| 47 |
+
splitting the input into sections and filtering out asterisks ('*') within each section.
|
| 48 |
+
The resulting output is a list of cleaned outline sections.
|
| 49 |
+
|
| 50 |
+
Parameters:
|
| 51 |
+
outline (str) -> Required: The input outline text containing Roman numeral formatted section headers.
|
| 52 |
+
|
| 53 |
+
Return:
|
| 54 |
+
outline_list (list): A list containing cleaned sections of the input outline, with asterisks removed.
|
| 55 |
+
'''
|
| 56 |
+
|
| 57 |
+
sections = re.split(r'\b[IVXLCDM]+\.\s', outline)[1:]
|
| 58 |
+
sections = [f"{i}. {section}" for i, section in enumerate(sections, start=1)]
|
| 59 |
+
|
| 60 |
+
outline_list = []
|
| 61 |
+
|
| 62 |
+
for section in sections:
|
| 63 |
+
cleaned_section = remove_ast(section)
|
| 64 |
+
outline_list.append(cleaned_section)
|
| 65 |
+
|
| 66 |
+
return outline_list
|
| 67 |
+
|
product_description_generation.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.prompts import PromptTemplate
|
| 2 |
+
from langchain.chains import LLMChain
|
| 3 |
+
|
| 4 |
+
def product_description_gen(product_name, product_desc, tone_of_voice, llm):
|
| 5 |
+
|
| 6 |
+
'''
|
| 7 |
+
Description:
|
| 8 |
+
This function generates an engaging product description based on user-provided inputs.
|
| 9 |
+
It utilizes the LangChain library to prompt an AI model (Language Model) to create a product description tailored to the specified product name, description, tone of voice, and a provided LLM (Long Language Model).
|
| 10 |
+
'''
|
| 11 |
+
|
| 12 |
+
'''
|
| 13 |
+
Parameters:
|
| 14 |
+
product_name (str) -> Required: The name of the product for which the description is generated.
|
| 15 |
+
product_desc (str) -> Required: A brief description of the product.
|
| 16 |
+
tone_of_voice (str) -> Required: The intended tone of the product description (e.g., professional, friendly, persuasive).
|
| 17 |
+
llm -> Required: The Long Language Model (LLM) used for generating the description.
|
| 18 |
+
'''
|
| 19 |
+
|
| 20 |
+
'''
|
| 21 |
+
Return Value:
|
| 22 |
+
product_desc (str): The generated Amazon product description.
|
| 23 |
+
'''
|
| 24 |
+
productDesc_prompt = f"Write an engagging and {tone_of_voice} Amazon product description of {product_name} Product here is a short description of my product:\n\n{product_desc}\n"
|
| 25 |
+
|
| 26 |
+
productDesc_promptTemp = PromptTemplate(
|
| 27 |
+
input_variables=["text_input"],
|
| 28 |
+
template="You are a content creator and product description writer who helps clients to write their product description on amazon:\n{text_input}\nAmazon Product Description:")
|
| 29 |
+
|
| 30 |
+
productDesc_extraction_chain = LLMChain(llm=llm, prompt=productDesc_promptTemp)
|
| 31 |
+
product_desc = productDesc_extraction_chain.run(productDesc_prompt)
|
| 32 |
+
|
| 33 |
+
return product_desc
|