import streamlit as st import pandas as pd from io import StringIO import streamlit as st import base64 import requests # Function to fetch and encode the image to base64 def get_image_as_base64(url): response = requests.get(url) if response.status_code == 200: # Convert the image to base64 return base64.b64encode(response.content).decode("utf-8") else: return None # Function to create a download link for the image def create_download_link(filename, base64_str): href = f'Download Image' return href # URL of the image you want to display and download image_url = "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/AswjRkg3QMR503fAScTrV.png" image_base64 = get_image_as_base64(image_url) if image_base64 is not None: # Display the image using the base64 string with st.sidebar: st.markdown(f"![image](data:image/png;base64,{image_base64})") # Provide a download link for the image download_link = create_download_link("downloaded_image.png", image_base64) st.markdown(download_link, unsafe_allow_html=True) else: st.sidebar.write("Failed to load the image.") with st.sidebar: st.markdown(""" ![image](https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/AswjRkg3QMR503fAScTrV.png) """) st.markdown(""" # Understanding indicators of compromise (IOCs) is crucial for network security. πŸ” ## Overview of the pattern and recommended actions: ## Indicators of Compromise (IOCs): 1. File Hash Signatures: Attacks leave specific file hash signatures. These can be used to identify compromised files. πŸ“ 2. Command and Control (C2) IP Addresses: Monitor network traffic for connections to known C2 servers. Blocking these IPs can prevent further communication. 🚫🌐 3. Domains: Keep an eye on domains associated. These may appear in phishing emails or malicious URLs. πŸ•΅οΈβ€β™‚οΈπŸ”— 4. Malware Analysis Reports: Stay informed through reliable sources like the FBI and other security organizations. πŸ“ŠπŸ” ## Network Exposure Assessment: 1. Review Logs: Analyze network logs for suspicious activity. Look for signs of unauthorized access, unusual authentication attempts, or unexpected network connections. πŸ“πŸ” 2. Quarantine and Re-Image: At the first sign of compromise, quarantine affected hosts and re-image them. This helps remove any lingering malware. πŸ›‘οΈπŸ’Ύ 3. Collect Artifacts: Gather artifacts such as running processes, services, and recent network connections. These can provide insights into the attack. πŸ•΅οΈβ€β™€οΈπŸ”Ž 4. Provision New Credentials: Change account credentials to prevent further unauthorized access. πŸ”‘πŸ”„ ## Cloud-Based Exposures: 1. Cloud Security Policies: Train engineers on cloud security best practices. Implement robust access controls, encryption, and regular audits. β˜οΈπŸ”’ 2. Backup and Recovery: Regularly back up critical data to prevent data loss due to ransomware. Test data restoration procedures. πŸ’ΎπŸ”„ 3. Zero Trust Architecture: Adopt a zero-trust approach, where every access request is verified, regardless of location or network segment. 🚫πŸ‘₯ 4. Security Awareness Training: Educate employees about phishing, social engineering, and safe online practices. πŸŽ“πŸ›‘οΈ ## Timely detection and proactive measures are essential to mitigate the impact of attacks. ### Stay vigilant and collaborate with security experts to protect your networks and data. πŸ€πŸ›‘οΈ """) # Sample data for the inline CSV data = """ Section,Title,White_Team_Libraries,Red_Team_Libraries 1.1,"Responsible and Trustworthy Artificial Intelligence","ai-fairness-360, tensorflow, pytorch","adversarial-robustness-toolbox, tensorflow, pytorch" 1.2,"Who is This For?","numpy, pandas","numpy, scipy" 1.3,"Why a Checklist?","pandas, matplotlib","scikit-learn, seaborn" 1.4,"Not Comprehensive","streamlit, dash","flask, django" 1.5,"Large Language Model Challenges","transformers, datasets","keras, tensorflow" 1.6,"LLM Threat Categories","threatmodeler, pytm","mitmproxy, burpsuite" 1.7,"Artificial Intelligence Security and Privacy Training","owasp, safecode","hackthebox, tryhackme" 1.8,"Incorporate LLM Security and governance with Existing, Established Practices and Controls","compliance-checker, policy-sentry","sqlmap, nmap" 1.9,"Fundamental Security Principles","secure-code-toolkit, bandit","metasploit, pwntools" 1.10,"Risk","risk-frameworks, gordon","exploit-db, cve-search" 1.11,"Vulnerability and Mitigation Taxonomy","vuln-code-db, flawfinder","yara, volatility" 2,"Determining LLM Strategy","strategic-planning, mindmaps","red-team-toolkit, blue-team-toolkit" 2.1,"Deployment Strategy","docker, kubernetes","ansible, puppet" 3.1,"Adversarial Risk","cleverhans, foolbox","gym, reinforcement-learning" 3.2,"Threat Modeling","pytm, sea-sponge","attackflow, evilginx" 3.3,"AI Asset Inventory","asset-inventory, snipe-it","openvas, qualys" 3.4,"AI Security and Privacy Training","security-training, cybrary","offensive-security, sans" 3.5,"Establish Business Cases","business-model-canvas, leanstack","swot-analysis, pestle-analysis" 3.6,"Governance","corporate-governance, iso-standards","cobit, itil" 3.7,"Legal","legal-docs, law-libraries","case-law-search, legal-hack" 3.8,"Regulatory","regulatory-compliance, gdpr","fcc-tools, eu-data-protection" 3.9,"Using or Implementing Large Language Model Solutions","model-deployment, ml-ops","shadow-it, devops-tools" 3.10,"Testing, Evaluation, Verification, and Validation (TEVV)","test-automation, quality-assurance","bug-bounty, fuzzing" 3.11,"Model Cards and Risk Cards","model-card-toolkit, aboutml","threat-modeling, risk-analysis" 3.12,"RAG: Large Language Model Optimization","rag-tokenizer, huggingface-transformers","genetic-algorithms, particle-swarm" 3.13,"AI Red Teaming","red-team-framework, adversary-simulation","kali-linux, pen-test-tools" """ # Creating a DataFrame df = pd.read_csv(StringIO(data)) # Streamlit app st.title('AI System Defense Strategy') # Display DataFrame in the app st.write(df) # Instructions or additional app features can go here st.markdown(""" This table includes sections relevant to defending AI systems, along with recommended Python libraries for both white team (defense) and red team (offensive security) developer leads. Use the information to guide the development and testing of responsible and secure AI systems. """)