Spaces:
Sleeping
Sleeping
add: streamlit app
Browse files- .gitignore +4 -0
- .streamlit/config.toml +6 -0
- README.md +10 -0
- app.py +36 -0
- llm.py +2 -2
- prompts.py +16 -35
- self_discover.py +26 -8
.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
venv/
|
2 |
+
.env
|
3 |
+
__pycache__
|
4 |
+
prompt_log.txt
|
.streamlit/config.toml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[theme]
|
2 |
+
primaryColor="#F63366"
|
3 |
+
backgroundColor="#FFFFFF"
|
4 |
+
secondaryBackgroundColor="#F0F2F6"
|
5 |
+
textColor="#262730"
|
6 |
+
font="sans serif"
|
README.md
CHANGED
@@ -1,3 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
## SELF-DISCOVER FRAMEWORK
|
2 |
|
3 |
## Paper Overview [link](https://arxiv.org/pdf/2402.03620.pdf)
|
|
|
1 |
+
---
|
2 |
+
title: SELF-DISCOVER
|
3 |
+
emoji: 🔍
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: pink
|
6 |
+
sdk: streamlit
|
7 |
+
pinned: false
|
8 |
+
license: apache-2.0
|
9 |
+
---
|
10 |
+
|
11 |
## SELF-DISCOVER FRAMEWORK
|
12 |
|
13 |
## Paper Overview [link](https://arxiv.org/pdf/2402.03620.pdf)
|
app.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
from self_discover import SelfDiscover
|
4 |
+
|
5 |
+
|
6 |
+
st.set_page_config(
|
7 |
+
page_title="Reasoning Structure Generator",
|
8 |
+
page_icon="🔍",
|
9 |
+
layout="wide",
|
10 |
+
initial_sidebar_state="expanded"
|
11 |
+
)
|
12 |
+
st.title("Reasoning Structure Generator")
|
13 |
+
|
14 |
+
|
15 |
+
api_key = st.text_input("Enter OpenAI api key ")
|
16 |
+
task = st.text_area("Enter the task example you want to generate a reasoning structure for ")
|
17 |
+
|
18 |
+
if st.button("Generate Reasoning Structure"):
|
19 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
20 |
+
result = SelfDiscover(task)
|
21 |
+
result()
|
22 |
+
tab1, tab2, tab3 = st.tabs(["SELECTED_MODULES", "ADAPTED_MODULES", "REASONING_STRUCTURE"])
|
23 |
+
with tab1:
|
24 |
+
st.header("SELECTED_MODULES")
|
25 |
+
st.write(result.selected_modules)
|
26 |
+
|
27 |
+
with tab2:
|
28 |
+
st.header("ADAPTED_MODULES")
|
29 |
+
st.write(result.adapted_modules)
|
30 |
+
|
31 |
+
with tab3:
|
32 |
+
st.header("REASONING_STRUCTURE")
|
33 |
+
st.write(result.reasoning_structure)
|
34 |
+
else:
|
35 |
+
st.error("Please provide both your API key and a task example.")
|
36 |
+
|
llm.py
CHANGED
@@ -41,10 +41,10 @@ class LLM:
|
|
41 |
prompt)
|
42 |
return response.text
|
43 |
|
44 |
-
elif self.model_name == '
|
45 |
res = self.model.chat.completions.create(
|
46 |
model="gpt-3.5-turbo-1106",
|
47 |
-
response_format={"type": "json_object"},
|
48 |
messages=[
|
49 |
# {"role": "system", "content": "You are a helpful assistant."},
|
50 |
{"role": "user", "content": f"{prompt}"},
|
|
|
41 |
prompt)
|
42 |
return response.text
|
43 |
|
44 |
+
elif self.model_name == 'OpenAI':
|
45 |
res = self.model.chat.completions.create(
|
46 |
model="gpt-3.5-turbo-1106",
|
47 |
+
# response_format={"type": "json_object"},
|
48 |
messages=[
|
49 |
# {"role": "system", "content": "You are a helpful assistant."},
|
50 |
{"role": "user", "content": f"{prompt}"},
|
prompts.py
CHANGED
@@ -54,52 +54,33 @@ objectives?
|
|
54 |
|
55 |
|
56 |
select_prompt = """
|
57 |
-
|
58 |
-
|
59 |
-
{resonining_modules}
|
60 |
{Task}
|
|
|
61 |
Select several modules that are crucial for solving the tasks above
|
|
|
|
|
62 |
"""
|
63 |
|
64 |
adapt_prompt = """
|
65 |
Rephrase and specify each reasoning module so that it better helps solving the task:
|
|
|
|
|
|
|
66 |
SELECTED module descriptions:
|
67 |
{selected_modules}
|
68 |
-
|
69 |
-
Adapt each reasoning module description to better solve the tasks:
|
70 |
"""
|
71 |
|
72 |
implement_prompt = """
|
73 |
-
Operationalize the reasoning modules into a step-by-step reasoning plan in JSON format
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
45.58,47.78 M 45.58,47.78 L 53.25,36.07 L 66.29,48.90 L 78.69,61.09 L 55.57,80.69"/> draws a:
|
79 |
-
(A) circle (B) heptagon (C) hexagon (D) kite (E) line (F) octagon (G) pentagon(H) rectangle (I) sector (J) triangle
|
80 |
-
{ "Simplify SVG Path": ...
|
81 |
-
"Breakdown of Path Commands": {
|
82 |
-
"Move to Command (M)": "Sets the starting point for the next
|
83 |
-
command without drawing anything.",
|
84 |
-
"Line to Command (L) steps":
|
85 |
-
{"Start and end coordinates of each line segment":
|
86 |
-
"M 55.57,80.69 L 57.38,65.80: From point (55.57, 80.69) to (57.38,
|
87 |
-
65.80)"}, … and finally closing the shape at (55.57, 80.69)"}
|
88 |
-
"Critical Thinking Analysis": {
|
89 |
-
"Logical Reasoning": {
|
90 |
-
"Analysis of path continuity": "The path includes
|
91 |
-
multiple line segments that connect distinct points. The path ends by
|
92 |
-
connecting back to the starting point, indicating a closed shape.",
|
93 |
-
"Identification of closed shapes": "The final line
|
94 |
-
segment connects the last point back to the first point, which is
|
95 |
-
characteristic of a closed shape."},
|
96 |
-
…
|
97 |
-
"Final Reasoning and Decision": "With 7 distinct points all
|
98 |
-
connected in a closed path, the shape formed is a heptagon.",
|
99 |
-
"Final Answer": "B) heptagon}
|
100 |
-
|
101 |
ADAPTED module descriptions:
|
102 |
{adapted_modules}
|
103 |
-
|
104 |
-
Implement a reasoning structure
|
105 |
"""
|
|
|
54 |
|
55 |
|
56 |
select_prompt = """
|
57 |
+
In order to solve the given task:
|
58 |
+
<Task>
|
|
|
59 |
{Task}
|
60 |
+
</Task>
|
61 |
Select several modules that are crucial for solving the tasks above
|
62 |
+
from all the reasoning module description given below:
|
63 |
+
{resonining_modules}
|
64 |
"""
|
65 |
|
66 |
adapt_prompt = """
|
67 |
Rephrase and specify each reasoning module so that it better helps solving the task:
|
68 |
+
<Task>
|
69 |
+
{Task}
|
70 |
+
</Task>
|
71 |
SELECTED module descriptions:
|
72 |
{selected_modules}
|
73 |
+
Adapt each reasoning module description to better solve the task:
|
|
|
74 |
"""
|
75 |
|
76 |
implement_prompt = """
|
77 |
+
Operationalize the reasoning modules into a step-by-step reasoning plan in JSON format
|
78 |
+
Example task:
|
79 |
+
<Task>
|
80 |
+
{Task}
|
81 |
+
</Task>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
ADAPTED module descriptions:
|
83 |
{adapted_modules}
|
84 |
+
|
85 |
+
Implement a reasoning structure to generalise similar task to follow step-by-step and arrive at correct answers
|
86 |
"""
|
self_discover.py
CHANGED
@@ -4,12 +4,30 @@ from prompts import(
|
|
4 |
adapt_prompt,
|
5 |
implement_prompt
|
6 |
)
|
|
|
7 |
from llm import LLM
|
8 |
from task_example import task1
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
class SelfDiscover:
|
11 |
def __init__(self, task) -> None:
|
12 |
-
self.llm = LLM(model_name="
|
13 |
self.actions = ["SELECT", "ADAPT", "IMPLEMENT"]
|
14 |
self.task = task
|
15 |
|
@@ -17,28 +35,28 @@ class SelfDiscover:
|
|
17 |
for action in self.actions:
|
18 |
print(action)
|
19 |
if action == "SELECT":
|
20 |
-
print("yes")
|
21 |
prompt = select_prompt.replace("{Task}",self.task)
|
22 |
prompt = prompt.replace("{resonining_modules}", reasoning_modules)
|
23 |
-
|
24 |
self.selected_modules = self.llm(prompt)
|
|
|
25 |
|
26 |
elif action == "ADAPT":
|
27 |
prompt = adapt_prompt.replace("{Task}",self.task)
|
28 |
prompt = prompt.replace("{selected_modules}",self.selected_modules)
|
29 |
-
|
30 |
self.adapted_modules = self.llm(prompt)
|
31 |
|
32 |
elif action == "IMPLEMENT":
|
33 |
prompt = implement_prompt.replace("{Task}",self.task)
|
34 |
prompt = prompt.replace("{adapted_modules}", self.adapted_modules)
|
35 |
-
|
36 |
self.reasoning_structure = self.llm(prompt)
|
37 |
|
38 |
|
39 |
if __name__=="__main__":
|
40 |
result = SelfDiscover(task=task1)
|
41 |
result()
|
42 |
-
|
43 |
-
|
44 |
-
|
|
|
4 |
adapt_prompt,
|
5 |
implement_prompt
|
6 |
)
|
7 |
+
|
8 |
from llm import LLM
|
9 |
from task_example import task1
|
10 |
|
11 |
+
import logging
|
12 |
+
|
13 |
+
def setup_logging():
|
14 |
+
logger = logging.getLogger("__name__")
|
15 |
+
logger.setLevel(logging.INFO)
|
16 |
+
|
17 |
+
handler = logging.FileHandler("prompt_log.txt")
|
18 |
+
handler.setLevel(logging.INFO)
|
19 |
+
|
20 |
+
formatter = logging.Formatter('%(levelname)s - %(message)s')
|
21 |
+
handler.setFormatter(formatter)
|
22 |
+
|
23 |
+
logger.addHandler(handler)
|
24 |
+
return logger
|
25 |
+
|
26 |
+
logger = setup_logging()
|
27 |
+
|
28 |
class SelfDiscover:
|
29 |
def __init__(self, task) -> None:
|
30 |
+
self.llm = LLM(model_name="OpenAI")
|
31 |
self.actions = ["SELECT", "ADAPT", "IMPLEMENT"]
|
32 |
self.task = task
|
33 |
|
|
|
35 |
for action in self.actions:
|
36 |
print(action)
|
37 |
if action == "SELECT":
|
|
|
38 |
prompt = select_prompt.replace("{Task}",self.task)
|
39 |
prompt = prompt.replace("{resonining_modules}", reasoning_modules)
|
40 |
+
logger.info("SELECT PROMPT :" + prompt)
|
41 |
self.selected_modules = self.llm(prompt)
|
42 |
+
print(self.selected_modules)
|
43 |
|
44 |
elif action == "ADAPT":
|
45 |
prompt = adapt_prompt.replace("{Task}",self.task)
|
46 |
prompt = prompt.replace("{selected_modules}",self.selected_modules)
|
47 |
+
logger.info("ADAPT PROMPT :" + prompt)
|
48 |
self.adapted_modules = self.llm(prompt)
|
49 |
|
50 |
elif action == "IMPLEMENT":
|
51 |
prompt = implement_prompt.replace("{Task}",self.task)
|
52 |
prompt = prompt.replace("{adapted_modules}", self.adapted_modules)
|
53 |
+
logger.info("IMPLEMENT PROMPT:" + prompt)
|
54 |
self.reasoning_structure = self.llm(prompt)
|
55 |
|
56 |
|
57 |
if __name__=="__main__":
|
58 |
result = SelfDiscover(task=task1)
|
59 |
result()
|
60 |
+
logger.info(f"SELECTED_MODULES : {result.selected_modules}")
|
61 |
+
logger.info(f"ADAPTED_MODULES : {result.adapted_modules}")
|
62 |
+
logger.info(f"REASONING_STRUCTURE : {result.reasoning_structure}")
|