Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -29,37 +29,54 @@ def generate(prompt):
|
|
| 29 |
except:
|
| 30 |
yield "<<Some errors occured>>"
|
| 31 |
|
| 32 |
-
|
| 33 |
-
st.title("Stockmark-LLM-100b")
|
| 34 |
-
|
| 35 |
intro = """This is a demo site for Stockmark-LLM-100b. This service is running on AWS Inferentia2.
|
| 36 |
- Pretrained model: [stockmark/stockmark-100b](https://huggingface.co/stockmark/stockmark-100b)
|
| 37 |
- Instruction tuned model: [stockmark/stockmark-100b-instruct-v0.1](https://huggingface.co/stockmark/stockmark-100b-instruct-v0.1)
|
| 38 |
"""
|
| 39 |
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
-
prompt = st.session_state.get("prompt", "")
|
| 43 |
-
response = st.session_state.get("response", "")
|
| 44 |
|
| 45 |
-
if prompt == "" or response:
|
| 46 |
-
print("new_session")
|
| 47 |
-
prompt_new = st.text_area("Prompt:")
|
| 48 |
-
if prompt_new:
|
| 49 |
-
st.session_state["prompt"] = prompt_new
|
| 50 |
-
st.session_state["response"] = ""
|
| 51 |
-
st.rerun()
|
| 52 |
-
else:
|
| 53 |
-
prompt = st.text_area("Prompt:", value=prompt, disabled=True)
|
| 54 |
|
| 55 |
-
if prompt:
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
else:
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
except:
|
| 30 |
yield "<<Some errors occured>>"
|
| 31 |
|
|
|
|
|
|
|
|
|
|
| 32 |
intro = """This is a demo site for Stockmark-LLM-100b. This service is running on AWS Inferentia2.
|
| 33 |
- Pretrained model: [stockmark/stockmark-100b](https://huggingface.co/stockmark/stockmark-100b)
|
| 34 |
- Instruction tuned model: [stockmark/stockmark-100b-instruct-v0.1](https://huggingface.co/stockmark/stockmark-100b-instruct-v0.1)
|
| 35 |
"""
|
| 36 |
|
| 37 |
+
disclaimer = """
|
| 38 |
+
- Responses of our LLM may be incorrect, biased, or harmful.
|
| 39 |
+
- We may use users chat data in this demo to improve our LLM.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
tab1, tab2, tab3 = st.tabs(["Demo", "Disclaimer"])
|
| 44 |
|
|
|
|
|
|
|
| 45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
|
|
|
| 47 |
|
| 48 |
+
with tab1:
|
| 49 |
+
st.title("Stockmark-LLM-100b")
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
st.markdown(intro)
|
| 54 |
+
|
| 55 |
+
prompt = st.session_state.get("prompt", "")
|
| 56 |
+
response = st.session_state.get("response", "")
|
| 57 |
+
|
| 58 |
+
if prompt == "" or response:
|
| 59 |
+
print("new_session")
|
| 60 |
+
prompt_new = st.text_area("Prompt:")
|
| 61 |
+
if prompt_new:
|
| 62 |
+
st.session_state["prompt"] = prompt_new
|
| 63 |
+
st.session_state["response"] = ""
|
| 64 |
+
st.rerun()
|
| 65 |
else:
|
| 66 |
+
prompt = st.text_area("Prompt:", value=prompt, disabled=True)
|
| 67 |
+
|
| 68 |
+
if prompt:
|
| 69 |
+
|
| 70 |
+
if response:
|
| 71 |
+
with st.chat_message("assistant"):
|
| 72 |
+
st.write(response)
|
| 73 |
+
else:
|
| 74 |
+
with st.chat_message("assistant"):
|
| 75 |
+
response = st.write_stream(generate(prompt))
|
| 76 |
+
|
| 77 |
+
st.session_state["response"] = response
|
| 78 |
+
st.rerun()
|
| 79 |
+
|
| 80 |
+
with tab2:
|
| 81 |
+
st.title("Stockmark-LLM-100b: Disclaimer")
|
| 82 |
+
st.markdown(disclaimer)
|