Spaces:
Sleeping
Sleeping
File size: 5,546 Bytes
b9a2f1d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
## Integrate our code GEMINI API
import os
import pathlib
import textwrap
from PIL import Image
from constants import gemini_key
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain import PromptTemplate
from langchain.chains import LLMChain
import google.generativeai as genai
from langchain.memory import ConversationBufferMemory
from google.generativeai import GenerativeModel
from google.generativeai.types import HarmCategory, HarmBlockThreshold, HarmProbability
from langchain.chains import SequentialChain
import streamlit as st
# streamlit framework
st.set_page_config(
page_title="OxSecure A.I",
page_icon="π",
layout="wide"
)
# Load custom CSS
def load_css(file_name):
with open(file_name) as f:
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
# Load the CSS file
load_css("ui/Style.css")
#API configuration
os.environ["GOOGLE_API_KEY"]=gemini_key
genai.configure(api_key = os.environ['GOOGLE_API_KEY'])
## Function to load OpenAI model and get respones
def get_gemini_response(input, image):
model = genai.GenerativeModel('gemini-1.5-pro-latest')
if input != "":
response = model.generate_content(
[input, image],
safety_settings={
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
HarmProbability:HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE
}
)
else:
response = model.generate_content(
image,
safety_settings={
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
HarmProbability:HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE
}
)
return response.text
st.title('OxSecure Intelligence π§ ')
st.caption('Cybersecurity Best practices for Infrastructure')
st.subheader('By :- Aadi π§βπ»')
st.text('π Empower Tomorrow, π‘οΈ Secure Today: Unleash the Power of Cybersecurity Brilliance! π»β¨ ')
input_text=st.text_input("Search Your Desire Security Related Topic π")
input=st.text_input("Input Prompt: ",key="input")
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
image=""
if uploaded_file is not None:
image = Image.open(uploaded_file)
st.image(image, caption="Uploaded Image.", use_column_width=True)
submit=st.button("Tell me about the image")
if submit:
response=get_gemini_response(input,image)
st.subheader("The Response is")
st.write(response)
# Prompt Templates
first_input_prompt=PromptTemplate(
input_variables=['Topic'],
template="Tell me everything about and explain in so informative descriptive way about {Topic} "
)
# Memory
Topic_memory = ConversationBufferMemory(input_key='Topic', memory_key='chat_history')
Policy_memory = ConversationBufferMemory(input_key='security policies', memory_key='chat_history')
Practice_memory = ConversationBufferMemory(input_key='Practice', memory_key='description_history')
# GEMINI LLMS
llm = ChatGoogleGenerativeAI(
model="gemini-1.5-pro-latest",
safety_settings={
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
HarmProbability:HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE
}
)
chain=LLMChain(llm=llm,prompt=first_input_prompt,verbose=True,output_key='security policies',memory=Topic_memory)
# Prompt Templates
second_input_prompt=PromptTemplate(
input_variables=['security policies'],
template="write best {security policies} and perfect code snippet for implementing secure coding to this {Topic} and give me all important full secure coding principles about {Topic} use codes snippet for every countersome points . "
)
chain2=LLMChain(
llm=llm,prompt=second_input_prompt,verbose=True,output_key='Practice',memory=Policy_memory)
# Prompt Templates
third_input_prompt=PromptTemplate(
input_variables=['Practice'],
template="Implement 5 major best Cybersecurity {Practice} for this {Topic} that helps better security postures into infrastructure business. give Major cyberattack which is done by this {Topic} and write about malware which is developed by this {Topic}"
)
chain3=LLMChain(llm=llm,prompt=third_input_prompt,verbose=True,output_key='description',memory=Practice_memory)
parent_chain=SequentialChain(
chains=[chain,chain2,chain3],input_variables=['Topic'],output_variables=['security policies','Practice','description'],verbose=True)
if input_text:
st.text(parent_chain({'Topic':input_text}))
with st.expander('Your Topic'):
st.info(Topic_memory.buffer)
with st.expander('Major Practices'):
st.info(Practice_memory.buffer)
st.markdown("---")
st.markdown(" Created with β€οΈ by Aditya Pandey ")
|