Spaces:
Sleeping
Sleeping
Commit
·
46a21b6
1
Parent(s):
733cd20
Think Ai main code
Browse files- .gitignore +48 -0
- app.py +168 -0
- requirements.txt +0 -0
.gitignore
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Environment files
|
2 |
+
.env
|
3 |
+
|
4 |
+
# Virtual environments
|
5 |
+
venv/
|
6 |
+
env/
|
7 |
+
|
8 |
+
# Compiled files
|
9 |
+
__pycache__/
|
10 |
+
*.pyc
|
11 |
+
*.pyo
|
12 |
+
|
13 |
+
# Logs
|
14 |
+
*.log
|
15 |
+
|
16 |
+
# OS-specific
|
17 |
+
.DS_Store
|
18 |
+
Thumbs.db
|
19 |
+
desktop.ini
|
20 |
+
|
21 |
+
# IDE/Editor-specific
|
22 |
+
.vscode/
|
23 |
+
.idea/
|
24 |
+
*.iml
|
25 |
+
|
26 |
+
# Build and distribution
|
27 |
+
/build/
|
28 |
+
/dist/
|
29 |
+
/*.egg-info
|
30 |
+
|
31 |
+
# Temporary files
|
32 |
+
*.bak
|
33 |
+
*.swp
|
34 |
+
*~
|
35 |
+
|
36 |
+
# Dependency files
|
37 |
+
pip-log.txt
|
38 |
+
pip-delete-this-directory.txt
|
39 |
+
|
40 |
+
# Test coverage
|
41 |
+
.coverage
|
42 |
+
coverage/
|
43 |
+
*.cover
|
44 |
+
*.coverage.*
|
45 |
+
|
46 |
+
# Custom project files
|
47 |
+
secrets.json
|
48 |
+
config.local.json
|
app.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
import time
|
4 |
+
import streamlit as st
|
5 |
+
from langchain_groq import ChatGroq
|
6 |
+
from langchain_core.prompts import ChatPromptTemplate
|
7 |
+
from langchain_core.output_parsers import StrOutputParser
|
8 |
+
|
9 |
+
load_dotenv()
|
10 |
+
groq_api_key= os.getenv("GROQ_API_KEY")
|
11 |
+
|
12 |
+
p1 = os.getenv("pmpt1")
|
13 |
+
p2 = os.getenv("pmpt2")
|
14 |
+
p3 = os.getenv("pmpt3")
|
15 |
+
p4 = os.getenv("pmpt4")
|
16 |
+
p5 = os.getenv("pmpt5")
|
17 |
+
p6 = os.getenv("pmpt6")
|
18 |
+
p7 = os.getenv("pmpt7")
|
19 |
+
p8 = os.getenv("pmpt8")
|
20 |
+
p9 = os.getenv("pmpt9")
|
21 |
+
p10 = os.getenv("pmpt10")
|
22 |
+
p11 = os.getenv("pmpt11")
|
23 |
+
p12 = os.getenv("pmpt12")
|
24 |
+
p13 = os.getenv("pmpt13")
|
25 |
+
p14 = os.getenv("pmpt14")
|
26 |
+
p15 = os.getenv("pmpt15")
|
27 |
+
p16 = os.getenv("pmpt16")
|
28 |
+
p17 = os.getenv("pmpt17")
|
29 |
+
p18 = os.getenv("pmpt18")
|
30 |
+
p19 = os.getenv("pmpt19")
|
31 |
+
p20 = os.getenv("pmpt20")
|
32 |
+
p21 = os.getenv("pmpt21")
|
33 |
+
p22 = os.getenv("pmpt22")
|
34 |
+
p23 = os.getenv("pmpt23")
|
35 |
+
p24 = os.getenv("pmpt24")
|
36 |
+
p25 = os.getenv("pmpt25")
|
37 |
+
|
38 |
+
prompt1 = ChatPromptTemplate.from_messages([("system",p1),("user", "Question:{query1}")])
|
39 |
+
prompt2 = ChatPromptTemplate.from_messages([("system",p2),("user", "Question:{query1}")])
|
40 |
+
prompt3 = ChatPromptTemplate.from_messages([("system",p3),("user", "Question:{query1}")])
|
41 |
+
prompt4 = ChatPromptTemplate.from_messages([("system",p4),("user", "Question:{query1}")])
|
42 |
+
prompt5 = ChatPromptTemplate.from_messages([("system",p5),("user", "Question:{query1}")])
|
43 |
+
prompt6 = ChatPromptTemplate.from_messages([("system",p6), ("user", "Question:{query1}")])
|
44 |
+
prompt7 = ChatPromptTemplate.from_messages([("system",p7), ("user", "Question:{query1}")])
|
45 |
+
prompt8 = ChatPromptTemplate.from_messages([("system",p8), ("user", "Question:{query1}")])
|
46 |
+
prompt9 = ChatPromptTemplate.from_messages([("system",p9), ("user", "Question:{query1}")])
|
47 |
+
prompt10 = ChatPromptTemplate.from_messages([("system", p10), ("user", "Question:{query1}")])
|
48 |
+
prompt11 = ChatPromptTemplate.from_messages([("system", p11), ("user", "Question:{query1}")])
|
49 |
+
prompt12 = ChatPromptTemplate.from_messages([("system", p12), ("user", "Question:{query1}")])
|
50 |
+
prompt13 = ChatPromptTemplate.from_messages([("system", p13), ("user", "Question:{query1}")])
|
51 |
+
prompt14 = ChatPromptTemplate.from_messages([("system", p14), ("user", "Question:{query1}")])
|
52 |
+
prompt15 = ChatPromptTemplate.from_messages([("system", p15), ("user", "Question:{query1}")])
|
53 |
+
prompt16 = ChatPromptTemplate.from_messages([("system", p16), ("user", "Question:{query1}")])
|
54 |
+
prompt17 = ChatPromptTemplate.from_messages([("system", p17), ("user", "Question:{query1}")])
|
55 |
+
prompt18 = ChatPromptTemplate.from_messages([("system", p18), ("user", "Question:{query1}")])
|
56 |
+
prompt19 = ChatPromptTemplate.from_messages([("system", p19), ("user", "Question:{query1}")])
|
57 |
+
prompt20 = ChatPromptTemplate.from_messages([("system", p20), ("user", "Question:{query1}")])
|
58 |
+
prompt21 = ChatPromptTemplate.from_messages([("system", p21), ("user", "Question:{query1}")])
|
59 |
+
prompt22 = ChatPromptTemplate.from_messages([("system", p22), ("user", "Question:{query1}")])
|
60 |
+
prompt23 = ChatPromptTemplate.from_messages([("system", p23), ("user", "Question:{query1}")])
|
61 |
+
prompt24 = ChatPromptTemplate.from_messages([("system", p24), ("user", "Question:{query1}")])
|
62 |
+
prompt25 = ChatPromptTemplate.from_messages([("system", p25), ("user", "Question:{query1}")])
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
llm1 = ChatGroq(model_name="llama3-70b-8192", groq_api_key=groq_api_key)
|
67 |
+
output_parser = StrOutputParser()
|
68 |
+
|
69 |
+
chain1 = prompt1| llm1| output_parser
|
70 |
+
chain2 = prompt2| llm1| output_parser
|
71 |
+
chain3 = prompt3| llm1| output_parser
|
72 |
+
chain4 = prompt4| llm1| output_parser
|
73 |
+
chain5 = prompt5| llm1| output_parser
|
74 |
+
chain6 = prompt6| llm1| output_parser
|
75 |
+
chain7 = prompt7| llm1| output_parser
|
76 |
+
chain8 = prompt8| llm1| output_parser
|
77 |
+
chain9 = prompt9| llm1| output_parser
|
78 |
+
chain10 = prompt10| llm1| output_parser
|
79 |
+
chain11 = prompt11| llm1| output_parser
|
80 |
+
chain12 = prompt12| llm1| output_parser
|
81 |
+
chain13 = prompt13| llm1| output_parser
|
82 |
+
chain14 = prompt14| llm1| output_parser
|
83 |
+
chain15 = prompt15| llm1| output_parser
|
84 |
+
chain16 = prompt16| llm1| output_parser
|
85 |
+
chain17 = prompt17| llm1| output_parser
|
86 |
+
chain18 = prompt18| llm1| output_parser
|
87 |
+
chain19 = prompt19| llm1| output_parser
|
88 |
+
chain20 = prompt20| llm1| output_parser
|
89 |
+
chain21 = prompt21| llm1| output_parser
|
90 |
+
chain22 = prompt22| llm1| output_parser
|
91 |
+
chain23 = prompt23| llm1| output_parser
|
92 |
+
chain24 = prompt24| llm1| output_parser
|
93 |
+
chain25 = prompt25| llm1| output_parser
|
94 |
+
|
95 |
+
|
96 |
+
|
97 |
+
def generate_ai_content(thinking_type, usr_ip):
|
98 |
+
if thinking_type == "Analytical Thinking": return chain1.invoke({"query1": usr_ip})
|
99 |
+
elif headline == "Creative Thinking":return chain2.invoke({"query1": usr_ip})
|
100 |
+
elif headline == "Critical Thinking": return chain3.invoke({"query1": usr_ip})
|
101 |
+
elif headline == "Logical Thinking": return chain4.invoke({"query1": usr_ip})
|
102 |
+
elif headline == "Lateral Thinking": return chain5.invoke({"query1": usr_ip})
|
103 |
+
elif headline == "Divergent Thinking": return chain6.invoke({"query1": usr_ip})
|
104 |
+
elif headline == "Convergent Thinking": return chain7.invoke({"query1": usr_ip})
|
105 |
+
elif headline == "Empathetic Thinking": return chain8.invoke({"query1": usr_ip})
|
106 |
+
elif headline == "Systems Thinking": return chain9.invoke({"query1": usr_ip})
|
107 |
+
elif headline == "Intuitive Thinking": return chain10.invoke({"query1": usr_ip})
|
108 |
+
elif headline == "Strategic Thinking": return chain11.invoke({"query1": usr_ip})
|
109 |
+
elif headline == "Collaborative Thinking": return chain12.invoke({"query1": usr_ip})
|
110 |
+
elif headline == "Reverse Thinking": return chain13.invoke({"query1": usr_ip})
|
111 |
+
elif headline == "Practical Thinking": return chain14.invoke({"query1": usr_ip})
|
112 |
+
elif headline == "Mind Mapping": return chain15.invoke({"query1": usr_ip})
|
113 |
+
elif headline == "Trial-and-Error Thinking": return chain16.invoke({"query1": usr_ip})
|
114 |
+
elif headline == "Root Cause Analysis": return chain17.invoke({"query1": usr_ip})
|
115 |
+
elif headline == "Optimistic Thinking": return chain18.invoke({"query1": usr_ip})
|
116 |
+
elif headline == "Pessimistic Thinking": return chain19.invoke({"query1": usr_ip})
|
117 |
+
elif headline == "Abstract Thinking": return chain20.invoke({"query1": usr_ip})
|
118 |
+
elif headline == "Habitual Thinking": return chain21.invoke({"query1": usr_ip})
|
119 |
+
elif headline == "Scenario Thinking": return chain22.invoke({"query1": usr_ip})
|
120 |
+
elif headline == "Mathematical Thinking": return chain23.invoke({"query1": usr_ip})
|
121 |
+
elif headline == "Ethical Thinking": return chain24.invoke({"query1": usr_ip})
|
122 |
+
elif headline == "Design Thinking": return chain25.invoke({"query1": usr_ip})
|
123 |
+
|
124 |
+
|
125 |
+
st.title("Think AI")
|
126 |
+
st.text("Think AI is designed to explore all the posible ways to approch a problem to find the perfect solution.")
|
127 |
+
st.write("### Ask anything")
|
128 |
+
col1, col2 = st.columns([4, 1])
|
129 |
+
with col1:
|
130 |
+
user_input = st.text_area("Enter your text:", key="input_text", height=68)
|
131 |
+
with col2:
|
132 |
+
submit = st.button("Submit")
|
133 |
+
|
134 |
+
|
135 |
+
if submit and user_input.strip():
|
136 |
+
counter = 0
|
137 |
+
st.write("---")
|
138 |
+
st.write("### Generated Content")
|
139 |
+
headlines = ["Analytical Thinking", "Creative Thinking", "Critical Thinking","Logical Thinking",
|
140 |
+
"Lateral Thinking","Divergent Thinking", "Convergent Thinking", "Empathetic Thinking",
|
141 |
+
"Systems Thinking", "Intuitive Thinking","Strategic Thinking", "Collaborative Thinking",
|
142 |
+
"Reverse Thinking", "Practical Thinking", "Mind Mapping","Trial-and-Error Thinking",
|
143 |
+
"Root Cause Analysis", "Optimistic Thinking", "Pessimistic Thinking", "Abstract Thinking",
|
144 |
+
"Habitual Thinking", "Scenario Thinking", "Mathematical Thinking", "Ethical Thinking",
|
145 |
+
"Design Thinking",
|
146 |
+
]
|
147 |
+
for headline in headlines:
|
148 |
+
|
149 |
+
if counter >=5:
|
150 |
+
time.sleep(3)
|
151 |
+
counter =0
|
152 |
+
|
153 |
+
st.write(f"#### {headline}")
|
154 |
+
|
155 |
+
ai_content = generate_ai_content(headline, user_input)
|
156 |
+
st.markdown(ai_content,unsafe_allow_html=True)
|
157 |
+
# st.text_area(f" ", value=ai,key=headline)
|
158 |
+
counter+=1
|
159 |
+
|
160 |
+
st.markdown(
|
161 |
+
"""
|
162 |
+
<hr style="border: none; border-top: 3px double #000; margin-top: 20px; margin-bottom: 20px;">
|
163 |
+
""",
|
164 |
+
unsafe_allow_html=True
|
165 |
+
)
|
166 |
+
|
167 |
+
# End the box container
|
168 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
requirements.txt
ADDED
Binary file (17.9 kB). View file
|
|