awacke1's picture
Update app.py
12051cb verified
raw
history blame
4.37 kB
import streamlit as st
import pandas as pd
from io import StringIO
import streamlit as st
import base64
import requests
# Function to fetch and encode the image to base64
def get_image_as_base64(url):
response = requests.get(url)
if response.status_code == 200:
# Convert the image to base64
return base64.b64encode(response.content).decode("utf-8")
else:
return None
# Function to create a download link for the image
def create_download_link(filename, base64_str):
href = f'<a href="data:file/png;base64,{base64_str}" download="{filename}">Download Image</a>'
return href
# URL of the image you want to display and download
image_url = "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/AswjRkg3QMR503fAScTrV.png"
image_base64 = get_image_as_base64(image_url)
if image_base64 is not None:
# Display the image using the base64 string
with st.sidebar:
st.markdown(f"![image](data:image/png;base64,{image_base64})")
# Provide a download link for the image
download_link = create_download_link("downloaded_image.png", image_base64)
st.markdown(download_link, unsafe_allow_html=True)
else:
st.sidebar.write("Failed to load the image.")
with st.sidebar:
st.markdown("""
![image](https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/AswjRkg3QMR503fAScTrV.png)
""")
# Sample data for the inline CSV
data = """
Section,Title,White_Team_Libraries,Red_Team_Libraries
1.1,"Responsible and Trustworthy Artificial Intelligence","ai-fairness-360, tensorflow, pytorch","adversarial-robustness-toolbox, tensorflow, pytorch"
1.2,"Who is This For?","numpy, pandas","numpy, scipy"
1.3,"Why a Checklist?","pandas, matplotlib","scikit-learn, seaborn"
1.4,"Not Comprehensive","streamlit, dash","flask, django"
1.5,"Large Language Model Challenges","transformers, datasets","keras, tensorflow"
1.6,"LLM Threat Categories","threatmodeler, pytm","mitmproxy, burpsuite"
1.7,"Artificial Intelligence Security and Privacy Training","owasp, safecode","hackthebox, tryhackme"
1.8,"Incorporate LLM Security and governance with Existing, Established Practices and Controls","compliance-checker, policy-sentry","sqlmap, nmap"
1.9,"Fundamental Security Principles","secure-code-toolkit, bandit","metasploit, pwntools"
1.10,"Risk","risk-frameworks, gordon","exploit-db, cve-search"
1.11,"Vulnerability and Mitigation Taxonomy","vuln-code-db, flawfinder","yara, volatility"
2,"Determining LLM Strategy","strategic-planning, mindmaps","red-team-toolkit, blue-team-toolkit"
2.1,"Deployment Strategy","docker, kubernetes","ansible, puppet"
3.1,"Adversarial Risk","cleverhans, foolbox","gym, reinforcement-learning"
3.2,"Threat Modeling","pytm, sea-sponge","attackflow, evilginx"
3.3,"AI Asset Inventory","asset-inventory, snipe-it","openvas, qualys"
3.4,"AI Security and Privacy Training","security-training, cybrary","offensive-security, sans"
3.5,"Establish Business Cases","business-model-canvas, leanstack","swot-analysis, pestle-analysis"
3.6,"Governance","corporate-governance, iso-standards","cobit, itil"
3.7,"Legal","legal-docs, law-libraries","case-law-search, legal-hack"
3.8,"Regulatory","regulatory-compliance, gdpr","fcc-tools, eu-data-protection"
3.9,"Using or Implementing Large Language Model Solutions","model-deployment, ml-ops","shadow-it, devops-tools"
3.10,"Testing, Evaluation, Verification, and Validation (TEVV)","test-automation, quality-assurance","bug-bounty, fuzzing"
3.11,"Model Cards and Risk Cards","model-card-toolkit, aboutml","threat-modeling, risk-analysis"
3.12,"RAG: Large Language Model Optimization","rag-tokenizer, huggingface-transformers","genetic-algorithms, particle-swarm"
3.13,"AI Red Teaming","red-team-framework, adversary-simulation","kali-linux, pen-test-tools"
"""
# Creating a DataFrame
df = pd.read_csv(StringIO(data))
# Streamlit app
st.title('AI System Defense Strategy')
# Display DataFrame in the app
st.write(df)
# Instructions or additional app features can go here
st.markdown("""
This table includes sections relevant to defending AI systems, along with recommended Python libraries for both white team (defense) and red team (offensive security) developer leads. Use the information to guide the development and testing of responsible and secure AI systems.
""")