Raj3 commited on
Commit
1a30279
·
verified ·
1 Parent(s): 239b917

Upload 3 files

Browse files
Files changed (3) hide show
  1. pages/FAQ.py +59 -0
  2. pages/dashboard.py +127 -0
  3. pages/page1.py +50 -0
pages/FAQ.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ st.markdown(
4
+ """
5
+ <style>
6
+ div.stButton > button:first-child {
7
+ background-color: #ffd0d0;
8
+ }
9
+
10
+ div.stButton > button:active {
11
+ background-color: #ff6262;
12
+ }
13
+
14
+ .st-emotion-cache-6qob1r {
15
+ position: relative;
16
+ height: 100%;
17
+ width: 100%;
18
+ background-color: black;
19
+ overflow: overlay;
20
+ }
21
+
22
+ div[data-testid="stStatusWidget"] div button {
23
+ display: none;
24
+ }
25
+
26
+ .reportview-container {
27
+ margin-top: -2em;
28
+ }
29
+ #MainMenu {visibility: hidden;}
30
+ .stDeployButton {display:none;}
31
+ footer {visibility: hidden;}
32
+ #stDecoration {display:none;}
33
+ button[title="View fullscreen"]{
34
+ visibility: hidden;}
35
+ </style>
36
+ """,
37
+ unsafe_allow_html=True,
38
+ )
39
+
40
+ # Define the questions and answers
41
+ faq_data = {
42
+ "What is a language model, and how does it work?": "Language models (LM) are a type of artificial intelligence (AI) that delve into the vast world of text data. By analyzing massive amounts of written information, they develop an understanding of how language works. This empowers them to predict the next word in a sequence, translate languages, and even generate creative text formats.",
43
+ "How can I train my own language model?": "Yes, you can train your own language model (LM), but it requires effort and resources.It's a complex process. Consider starting small, exploring resources, and understanding the time commitment involved. There are also pre-trained, accessible LLMs available for exploration before venturing into training your own.",
44
+ "What are the key considerations when fine-tuning a language model for a specific task?": "Fine-tuning a pre-trained language model for a specific task is common practice. Addressing considerations such as dataset selection, hyperparameter tuning, and evaluation metrics can assist users in achieving optimal performance.",
45
+ "How do I evaluate the performance of my custom language model?": "Evaluating the performance of a custom language model involves various metrics and techniques. Explaining metrics such as perplexity, accuracy, and F1 score, as well as methodologies like cross-validation, can aid users in assessing their model's effectiveness.",
46
+ "What are some common challenges in training custom language models?": "Training language models can be challenging due to issues such as overfitting, data scarcity, and computational resources. Identifying these challenges and offering strategies to mitigate them can help users navigate the training process more effectively.",
47
+ "Can I use transfer learning with my own language model?": "Transfer learning, particularly using pre-trained language models as a starting point, is a popular approach in natural language processing. Discussing transfer learning techniques and their applicability to custom language models can provide valuable insights to users.",
48
+ "How can I optimize the performance of my language model for inference?": "Optimizing language model inference for efficiency and speed is essential, especially in real-time applications. Providing tips on model quantization, pruning, and deployment strategies can assist users in optimizing their models for inference.",
49
+ "What are some ethical considerations when developing and deploying custom language models?": "Ethical considerations, such as bias, fairness, and privacy, are increasingly important in language model development. Addressing these concerns and advocating for responsible AI practices can help users build and deploy their models ethically.",
50
+ "How do I handle out-of-domain or adversarial inputs with my language model?": "Language models may encounter inputs that deviate from their training data distribution, leading to performance degradation or vulnerabilities. Offering techniques for handling out-of-domain or adversarial inputs, such as robustness testing and adversarial training, can assist users in improving their model's robustness.",
51
+ "What resources and tools are available for building and deploying custom language models?": "Providing a curated list of resources, including libraries, frameworks, datasets, and tutorials, can empower users to explore and leverage the latest advancements in language model development and deployment."
52
+ }
53
+
54
+ st.title("Language Model FAQ")
55
+
56
+ # Display each question-answer pair in a dropdown expander
57
+ for question, answer in faq_data.items():
58
+ with st.expander(question):
59
+ st.write(answer)
pages/dashboard.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.vectorstores import FAISS
2
+ from langchain_community.embeddings import HuggingFaceEmbeddings
3
+ from langchain.prompts import PromptTemplate
4
+ import os
5
+ from langchain.memory import ConversationBufferWindowMemory
6
+ from langchain.chains import ConversationalRetrievalChain
7
+ import streamlit as st
8
+ import time
9
+ from dotenv import load_dotenv, find_dotenv
10
+ from langchain_together import Together
11
+
12
+ load_dotenv(find_dotenv())
13
+ st.set_page_config(page_title="ChatGPT BetaV1")
14
+ col1, col2, col3 = st.columns([1, 2, 1])
15
+ with col2:
16
+ st.title("ChatGPT BetaV1 🦾")
17
+ st.caption('⚠️ **_Note: Please Wait 5 second after Prompt_**')
18
+
19
+ st.sidebar.title("Welcome to ChatGPT")
20
+ #st.sidebar.image("WE_GO_JIM.png", width=275)
21
+ st.sidebar.title("Shoot your gym-related questions")
22
+ st.markdown(
23
+ """
24
+ <style>
25
+ div.stButton > button:first-child {
26
+ background-color: #ffd0d0;
27
+ }
28
+
29
+ div.stButton > button:active {
30
+ background-color: #ff6262;
31
+ }
32
+
33
+ .st-emotion-cache-6qob1r {
34
+ position: relative;
35
+ height: 100%;
36
+ width: 100%;
37
+ background-color: black;
38
+ overflow: overlay;
39
+ }
40
+
41
+ div[data-testid="stStatusWidget"] div button {
42
+ display: none;
43
+ }
44
+
45
+ .reportview-container {
46
+ margin-top: -2em;
47
+ }
48
+ #MainMenu {visibility: hidden;}
49
+ .stDeployButton {display:none;}
50
+ footer {visibility: hidden;}
51
+ #stDecoration {display:none;}
52
+ button[title="View fullscreen"]{
53
+ visibility: hidden;}
54
+ </style>
55
+ """,
56
+ unsafe_allow_html=True,
57
+ )
58
+
59
+
60
+ def reset_conversation():
61
+ st.session_state.messages = []
62
+ st.session_state.memory.clear()
63
+
64
+
65
+ if "messages" not in st.session_state:
66
+ st.session_state.messages = []
67
+
68
+ if "memory" not in st.session_state:
69
+ st.session_state.memory = ConversationBufferWindowMemory(k=2, memory_key="chat_history", return_messages=True)
70
+
71
+ embeddings = HuggingFaceEmbeddings(model_name="nomic-ai/nomic-embed-text-v1-ablated", model_kwargs={"trust_remote_code": True})
72
+ db = FAISS.load_local("data_byte", embeddings, allow_dangerous_deserialization=True)
73
+ db_retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": 4})
74
+
75
+ prompt_template = """<s>[INST]This is a chat template ,your name is chatgpt BetaV1,this model is made by Tarun,trained by Tarun,made by Tarun ,llm model is made by Tarun,you have 1 bilion parameters, and you are the private gpt model , your primary objective is to provide accurate and concise information related to code, question solving, based on the user's questions. Do not generate your own questions and answers. You will adhere strictly to the instructions provided, offering relevant context from the knowledge base while avoiding unnecessary details. Your responses will be brief, to the point, and in compliance with the established format. If a question falls outside the given context, rely on your own knowledge base to generate an appropriate response. You will prioritize the user's query and refrain from posing additional questions and do not repeat the prompt template and the things that you have said already.
76
+ QUESTION: {question}
77
+ CONTEXT: {context}
78
+ CHAT HISTORY: {chat_history}[/INST]
79
+ ASSISTANT:
80
+ </s>
81
+ """
82
+
83
+ prompt = PromptTemplate(template=prompt_template,
84
+ input_variables=['question', 'context', 'chat_history'])
85
+
86
+ llm = Together(
87
+ model="mistralai/Mixtral-8x7B-Instruct-v0.1",
88
+ temperature=0.7,
89
+ max_tokens=1024,
90
+ top_k=1,
91
+ together_api_key=os.environ['T_API']
92
+ )
93
+
94
+ qa = ConversationalRetrievalChain.from_llm(
95
+ llm=llm,
96
+ memory=st.session_state.memory,
97
+ retriever=db_retriever,
98
+ combine_docs_chain_kwargs={'prompt': prompt}
99
+ )
100
+
101
+ for message in st.session_state.messages:
102
+ with st.chat_message(message.get("role")):
103
+ st.write(message.get("content"))
104
+
105
+ input_prompt = st.chat_input("Say something")
106
+
107
+ if input_prompt:
108
+ with st.chat_message("user"):
109
+ st.write(input_prompt)
110
+
111
+ st.session_state.messages.append({"role": "user", "content": input_prompt})
112
+
113
+ with st.chat_message("assistant"):
114
+ with st.status("Lifting data, one bit at a time 💡🦾...", expanded=True):
115
+ result = qa.invoke(input=input_prompt)
116
+
117
+ message_placeholder = st.empty()
118
+
119
+ full_response = "⚠️ **_Note: Information provided may be inaccurate._** \n\n\n"
120
+ for chunk in result["answer"]:
121
+ full_response += chunk
122
+ time.sleep(0.02)
123
+
124
+ message_placeholder.markdown(full_response + " ▌")
125
+ st.button('Reset All Chat 🗑️', on_click=reset_conversation)
126
+
127
+ st.session_state.messages.append({"role": "assistant", "content": result["answer"]})
pages/page1.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ st.title("pages1")
3
+
4
+
5
+
6
+
7
+ heap_script = """
8
+ <script type="text/javascript">
9
+ window.heap=window.heap||[],heap.load=function(e,t){window.heap.appid=e,window.heap.config=t=t||{};var r=document.createElement("script");r.type="text/javascript",r.async=!0,r.src="https://cdn.heapanalytics.com/js/heap-"+e+".js";var a=document.getElementsByTagName("script")[0];a.parentNode.insertBefore(r,a);for(var n=function(e){return function(){heap.push([e].concat(Array.prototype.slice.call(arguments,0)))}},p=["addEventProperties","addUserProperties","clearEventProperties","identify","resetIdentity","removeEventProperty","setEventProperties","track","unsetEventProperty"],o=0;o<p.length;o++)heap[p[o]]=n(p[o])};
10
+ heap.load("3171239321");
11
+ </script>
12
+ """
13
+
14
+ st.markdown(heap_script, unsafe_allow_html=True)
15
+
16
+
17
+
18
+ # firebase
19
+
20
+ import streamlit as st
21
+
22
+ firebase_script = """
23
+ <!-- Firebase Initialization Script -->
24
+ <script type="module">
25
+ // Import the functions you need from the SDKs you need
26
+ import { initializeApp } from "https://www.gstatic.com/firebasejs/10.11.1/firebase-app.js";
27
+ import { getAnalytics } from "https://www.gstatic.com/firebasejs/10.11.1/firebase-analytics.js";
28
+ // TODO: Add SDKs for Firebase products that you want to use
29
+ // https://firebase.google.com/docs/web/setup#available-libraries
30
+
31
+ // Your web app's Firebase configuration
32
+ // For Firebase JS SDK v7.20.0 and later, measurementId is optional
33
+ const firebaseConfig = {
34
+ apiKey: "AIzaSyD187ReDKog24MiVa_t0Gmd8wRwBoiyPtw",
35
+ authDomain: "gdfg-cf9d9.firebaseapp.com",
36
+ projectId: "gdfg-cf9d9",
37
+ storageBucket: "gdfg-cf9d9.appspot.com",
38
+ messagingSenderId: "79781427509",
39
+ appId: "1:79781427509:web:101939210ade029688347c",
40
+ measurementId: "G-XD47ETNL0V"
41
+ };
42
+
43
+ // Initialize Firebase
44
+ const app = initializeApp(firebaseConfig);
45
+ const analytics = getAnalytics(app);
46
+ </script>
47
+ <!-- End Firebase Initialization Script -->
48
+ """
49
+
50
+ st.markdown(firebase_script, unsafe_allow_html=True)