isayahc commited on
Commit
c605fcf
·
1 Parent(s): cd31e3b

removed redundant file

Browse files
Files changed (1) hide show
  1. langhchain_generate_components.py +0 -152
langhchain_generate_components.py DELETED
@@ -1,152 +0,0 @@
1
- """
2
- #TODO: make a agent that uses HUMAMN as a tool to get:
3
- - Purpose of science experiment
4
- - What fields of study do they already know of
5
-
6
- #IDEA: Platform generate more indepth experiments by generaing a data set and generate / collect scienfic data
7
-
8
- ### Chatbot
9
- the chatbot helps the BOUNTY_BOARD_CHAIN generate science experiments
10
-
11
- ### EXPERIMENT and Provide feedback on experiments
12
-
13
- ### Interrgration
14
-
15
- - I need to intergrate this code into the app. This includes creating an id for each post, and potentially and a comment section for each "Experiment"
16
- - I addition i need to generate a mostly pinecone retriever to geenrate scientific experiments from the "community vectore search"
17
- - potentially have prenium users store their private data, but i may not implement this during the hackathon
18
- """
19
-
20
- # https://python.langchain.com/docs/modules/model_io/output_parsers/types/structured
21
- from langchain.output_parsers import ResponseSchema, StructuredOutputParser
22
- from langchain.prompts import PromptTemplate
23
- from langchain_openai import ChatOpenAI
24
- from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
25
- from langchain.memory import ConversationBufferMemory
26
- from langchain_core.runnables import RunnablePassthrough
27
- from langchain.retrievers import ArxivRetriever, pubmed
28
- from langchain_core.output_parsers import StrOutputParser
29
- from langchain.retrievers import ArxivRetriever
30
- from langchain.retrievers import PubMedRetriever
31
- from langchain.retrievers import WikipediaRetriever
32
- from operator import itemgetter
33
- # import dotenv
34
- import os
35
- from dotenv import load_dotenv
36
-
37
- os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
38
-
39
-
40
- # The scheme for creating experiments
41
- # experiment_schema = [
42
- # ResponseSchema(name="Material", description="list of materials need to perfrom the experiments please be specific", type="list"),
43
- # ]
44
-
45
-
46
- response_schemas = [
47
- ResponseSchema(name="Material", description="The base components needed to create this items from scratch DIY This item must be exact and not an estimation", type="list"),
48
- ResponseSchema(name="Feild Of Study", description="List the field of study this can be used for", type="list"),
49
- ]
50
-
51
- output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
52
- memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
53
- format_instructions = output_parser.get_format_instructions()
54
-
55
-
56
- # experiment_output_parser = StructuredOutputParser.from_response_schemas(experiment_schema)
57
- # maker_output_parser = StructuredOutputParser.from_response_schemas(maker_schema)
58
-
59
- memory = ConversationBufferMemory(
60
- memory_key="chat_history",
61
- return_messages=True,
62
- )
63
-
64
- # format_instructions = experiment_output_parser.get_format_instructions()
65
- # maker_format_instructions = maker_output_parser.get_format_instructions()
66
-
67
- # output_parser = StructuredOutputParser.from_response_schemas(maker_schema)
68
-
69
- format_instructions = output_parser.get_format_instructions()
70
-
71
- # experiment_prompt = PromptTemplate(
72
- # template="You must generate well detailed science experiments.\n{format_instructions}\n{question}\n{context}",
73
- # input_variables=["question"],
74
- # partial_variables={"format_instructions": format_instructions},
75
- # memory = memory
76
- # )
77
-
78
- maker_prompt = PromptTemplate(
79
- template="You must generate a well detailed list of items for creating a given item from scratch. \
80
- Also describe the purpose for a text-to-3d model to use for extra context\n{format_instructions}\n{question}\n{context}",
81
- input_variables=["question"],
82
- partial_variables={"format_instructions": format_instructions},
83
- memory = memory
84
- )
85
-
86
-
87
- def join_strings(*args: str) -> str:
88
- """
89
- Join an arbitrary number of strings into one string.
90
-
91
- Args:
92
- *args: Variable number of strings to join.
93
-
94
- Returns:
95
- str: Joined string.
96
- """
97
- return ''.join(args)
98
-
99
- def format_docs(docs):
100
- return "\n\n".join([join_strings(d.page_content, d.metadata['Entry ID'],d.metadata['Title'], ) for d in docs])
101
-
102
-
103
-
104
- # model = ChatOpenAI(temperature=0)
105
- model = ChatOpenAI(temperature=0,model="gpt-4")
106
-
107
-
108
- arxiv_retriever = ArxivRetriever(load_max_docs=2)
109
-
110
- pub_med_retriever = PubMedRetriever()
111
-
112
- wikipedia_retriever = WikipediaRetriever()
113
-
114
- # arxiv_chain = (
115
- # {"context": arxiv_retriever, "question": RunnablePassthrough()}
116
- # | experiment_prompt
117
- # | model
118
- # | experiment_output_parser
119
- # )
120
-
121
- # pub_med_chain = (
122
- # {"context": pub_med_retriever, "question": RunnablePassthrough()}
123
- # | experiment_prompt
124
- # | model
125
- # | experiment_output_parser
126
- # )
127
-
128
- # wikipedia_chain = (
129
- # {"context": wikipedia_retriever, "question": RunnablePassthrough()}
130
- # | experiment_prompt
131
- # | model
132
- # | experiment_output_parser
133
- # )
134
-
135
- maker_wikipedia_chain = (
136
- {"context": wikipedia_retriever, "question": RunnablePassthrough()}
137
- | maker_prompt
138
- | model
139
- | output_parser
140
- )
141
-
142
-
143
- if __name__ == "__main__":
144
-
145
-
146
- # query = "how to create electronoic on a cellulose subtstrate"
147
- query = "A Microscope"
148
-
149
- # output = wikipedia_chain.invoke(query)
150
- output = maker_wikipedia_chain.invoke(query)
151
- x=0
152
-