madji05 commited on
Commit
7bc1798
·
verified ·
1 Parent(s): ed85685

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +8 -7
  2. app.py +85 -0
  3. gitattributes +35 -0
  4. requirements.txt +1 -0
  5. robot.png +0 -0
README.md CHANGED
@@ -1,10 +1,11 @@
1
  ---
2
- title: Py
3
- emoji: 🚀
4
- colorFrom: red
5
- colorTo: blue
6
- sdk: docker
 
 
7
  pinned: false
 
8
  ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Code Assitant
3
+ emoji: 🦀
4
+ colorFrom: green
5
+ colorTo: pink
6
+ sdk: streamlit
7
+ sdk_version: 1.33.0
8
+ app_file: app.py
9
  pinned: false
10
+ license: mit
11
  ---
 
 
app.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import google.generativeai as genai
4
+ import time
5
+
6
+ os.environ["AIzaSyCoyRW0-IIVik1LnZtr3KTXhDG8qB_LvTk"] = os.getenv("AIzaSyCoyRW0-IIVik1LnZtr3KTXhDG8qB_LvTk")
7
+
8
+ icons = {"assistant": "robot.png", "user": "man-kddi.png"}
9
+
10
+ model = genai.GenerativeModel('gemini-1.5-flash-latest')
11
+ prompt = """You are a programming teaching assistant named GenXAI(Generative eXpert AI), created by Pachaiappan [linkdin](https://www.linkedin.com/in/pachaiappan) an AI Specialist. Answer only the programming, error-fixing and code-related question that being asked.
12
+ Important note, If Question non-related to coding or programming means, you have to say: 'Please ask only coding-related questions.' except greeting and those kind of questions "who are you", "who created you".
13
+ previous_chat:
14
+ {chat_history}
15
+ Human: {human_input}
16
+ Chatbot:"""
17
+
18
+ previous_response = ""
19
+ def get_response(query):
20
+ global previous_response
21
+
22
+ for i in st.session_state['history']:
23
+ if i is not None:
24
+ previous_response += f"Human: {i[0]}\n Chatbot: {i[1]}\n"
25
+
26
+ response = model.generate_content(prompt.format(human_input=query, chat_history=previous_response))
27
+ st.session_state['history'].append((query, response.text))
28
+ return response.text
29
+
30
+
31
+ def response_streaming(text):
32
+ for i in text:
33
+ yield i
34
+ time.sleep(0.001)
35
+
36
+ st.title("GenXAi")
37
+ st.caption("I am Generative EXpert Assistant for Programming Related Task!")
38
+
39
+ st.markdown("""
40
+ <style>
41
+ .justified-text {
42
+ text-align: justify;
43
+ }
44
+ </style>
45
+ """, unsafe_allow_html=True)
46
+
47
+ with st.sidebar:
48
+ st.header("ABOUT:")
49
+
50
+ st.caption("""
51
+ <div class="justified-text">
52
+ This is GenXai (Generation Expert AI), designed to assist with programming-related questions. This AI can help you answer your coding queries, fix errors, and much more. Additionally, you can chat with GenXai to build and refine your questions, facilitating a more productive conversation.
53
+ </div>
54
+ """, unsafe_allow_html=True)
55
+
56
+ for _ in range(17):
57
+ st.write("")
58
+ st.subheader("Build By:")
59
+ st.write("[Pachaiappan❤️](https://mr-vicky-01.github.io/Portfolio)")
60
+ st.write("contact: [Email](mailto:[email protected])")
61
+
62
+ if 'messages' not in st.session_state:
63
+ st.session_state.messages = [{'role': 'assistant', 'content': "I'm Here to help your programming realted questions😉"}]
64
+
65
+ if 'history' not in st.session_state:
66
+ st.session_state.history = []
67
+
68
+ for message in st.session_state.messages:
69
+ with st.chat_message(message['role'], avatar=icons[message['role']]):
70
+ st.write(message['content'])
71
+
72
+ user_input = st.chat_input("Ask Your Questions 👉..")
73
+ if user_input:
74
+ st.session_state.messages.append({'role': 'user', 'content': user_input})
75
+ with st.chat_message("user", avatar="man-kddi.png"):
76
+ st.write(user_input)
77
+
78
+ with st.spinner("Thinking..."):
79
+ response = get_response(user_input)
80
+
81
+ with st.chat_message("user", avatar="robot.png"):
82
+ st.write_stream(response_streaming(response))
83
+
84
+ message = {"role": "assistant", "content": response}
85
+ st.session_state.messages.append(message)
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ google.generativeai
robot.png ADDED