Upload 54 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +127 -0
- Dockerfile +52 -0
- __init__.py +0 -0
- __pycache__/__init__.cpython-311.pyc +0 -0
- __pycache__/app.cpython-311.pyc +0 -0
- __pycache__/main.cpython-311.pyc +0 -0
- __pycache__/main_isp.cpython-311.pyc +0 -0
- app.py +190 -0
- config.json +98 -0
- core/__init__.py +2 -0
- core/__pycache__/__init__.cpython-311.pyc +0 -0
- core/__pycache__/dhcp_server.cpython-311.pyc +0 -0
- core/__pycache__/firewall.cpython-311.pyc +0 -0
- core/__pycache__/ip_parser.cpython-311.pyc +0 -0
- core/__pycache__/logger.cpython-311.pyc +0 -0
- core/__pycache__/nat_engine.cpython-311.pyc +0 -0
- core/__pycache__/openvpn_manager.cpython-311.pyc +0 -0
- core/__pycache__/packet_bridge.cpython-311.pyc +0 -0
- core/__pycache__/session_tracker.cpython-311.pyc +0 -0
- core/__pycache__/socket_translator.cpython-311.pyc +0 -0
- core/__pycache__/tcp_engine.cpython-311.pyc +0 -0
- core/__pycache__/traffic_router.cpython-311.pyc +0 -0
- core/__pycache__/virtual_router.cpython-311.pyc +0 -0
- core/dhcp_server.py +391 -0
- core/firewall.py +523 -0
- core/ip_parser.py +546 -0
- core/logger.py +555 -0
- core/nat_engine.py +516 -0
- core/openvpn_manager.py +658 -0
- core/packet_bridge.py +664 -0
- core/session_tracker.py +602 -0
- core/socket_translator.py +653 -0
- core/tcp_engine.py +716 -0
- core/traffic_router.py +455 -0
- core/virtual_router.py +565 -0
- database/app.db +0 -0
- main.py +62 -0
- main_isp.py +273 -0
- models/__pycache__/user.cpython-311.pyc +0 -0
- models/user.py +18 -0
- openvpn/ca.crt +20 -0
- openvpn/dh.pem +8 -0
- openvpn/server.conf +21 -0
- openvpn/server.crt +86 -0
- openvpn/server.key +28 -0
- requirements.txt +33 -0
- routes/__pycache__/isp_api.cpython-311.pyc +0 -0
- routes/__pycache__/user.cpython-311.pyc +0 -0
- routes/isp_api.py +1171 -0
- routes/user.py +39 -0
.gitignore
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
*.so
|
6 |
+
.Python
|
7 |
+
build/
|
8 |
+
develop-eggs/
|
9 |
+
dist/
|
10 |
+
downloads/
|
11 |
+
eggs/
|
12 |
+
.eggs/
|
13 |
+
lib/
|
14 |
+
lib64/
|
15 |
+
parts/
|
16 |
+
sdist/
|
17 |
+
var/
|
18 |
+
wheels/
|
19 |
+
pip-wheel-metadata/
|
20 |
+
share/python-wheels/
|
21 |
+
*.egg-info/
|
22 |
+
.installed.cfg
|
23 |
+
*.egg
|
24 |
+
MANIFEST
|
25 |
+
|
26 |
+
# Virtual environments
|
27 |
+
.env
|
28 |
+
.venv
|
29 |
+
env/
|
30 |
+
venv/
|
31 |
+
ENV/
|
32 |
+
env.bak/
|
33 |
+
venv.bak/
|
34 |
+
|
35 |
+
# IDEs
|
36 |
+
.vscode/
|
37 |
+
.idea/
|
38 |
+
*.swp
|
39 |
+
*.swo
|
40 |
+
*~
|
41 |
+
|
42 |
+
# OS
|
43 |
+
.DS_Store
|
44 |
+
.DS_Store?
|
45 |
+
._*
|
46 |
+
.Spotlight-V100
|
47 |
+
.Trashes
|
48 |
+
ehthumbs.db
|
49 |
+
Thumbs.db
|
50 |
+
|
51 |
+
# Logs
|
52 |
+
*.log
|
53 |
+
logs/
|
54 |
+
/var/log/
|
55 |
+
|
56 |
+
# Database
|
57 |
+
*.db
|
58 |
+
*.sqlite
|
59 |
+
*.sqlite3
|
60 |
+
|
61 |
+
# VPN configurations (for security)
|
62 |
+
*.ovpn
|
63 |
+
*.crt
|
64 |
+
*.key
|
65 |
+
*.pem
|
66 |
+
/tmp/vpn_client_configs/
|
67 |
+
|
68 |
+
# OpenVPN
|
69 |
+
/etc/openvpn/
|
70 |
+
/var/log/openvpn/
|
71 |
+
|
72 |
+
# Temporary files
|
73 |
+
/tmp/
|
74 |
+
*.tmp
|
75 |
+
*.temp
|
76 |
+
|
77 |
+
# Flask
|
78 |
+
instance/
|
79 |
+
.webassets-cache
|
80 |
+
|
81 |
+
# Environment variables
|
82 |
+
.env.local
|
83 |
+
.env.development.local
|
84 |
+
.env.test.local
|
85 |
+
.env.production.local
|
86 |
+
|
87 |
+
# Coverage reports
|
88 |
+
htmlcov/
|
89 |
+
.tox/
|
90 |
+
.coverage
|
91 |
+
.coverage.*
|
92 |
+
.cache
|
93 |
+
nosetests.xml
|
94 |
+
coverage.xml
|
95 |
+
*.cover
|
96 |
+
*.py,cover
|
97 |
+
.hypothesis/
|
98 |
+
.pytest_cache/
|
99 |
+
|
100 |
+
# Jupyter Notebook
|
101 |
+
.ipynb_checkpoints
|
102 |
+
|
103 |
+
# pyenv
|
104 |
+
.python-version
|
105 |
+
|
106 |
+
# Celery
|
107 |
+
celerybeat-schedule
|
108 |
+
celerybeat.pid
|
109 |
+
|
110 |
+
# SageMath parsed files
|
111 |
+
*.sage.py
|
112 |
+
|
113 |
+
# Spyder project settings
|
114 |
+
.spyderproject
|
115 |
+
.spyproject
|
116 |
+
|
117 |
+
# Rope project settings
|
118 |
+
.ropeproject
|
119 |
+
|
120 |
+
# mkdocs documentation
|
121 |
+
/site
|
122 |
+
|
123 |
+
# mypy
|
124 |
+
.mypy_cache/
|
125 |
+
.dmypy.json
|
126 |
+
dmypy.json
|
127 |
+
|
Dockerfile
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Virtual ISP Stack with OpenVPN Integration
|
2 |
+
# Dockerfile for containerized deployment
|
3 |
+
|
4 |
+
FROM python:3.11-slim
|
5 |
+
|
6 |
+
# Set working directory
|
7 |
+
WORKDIR /app
|
8 |
+
|
9 |
+
# Install system dependencies
|
10 |
+
RUN apt-get update && apt-get install -y \
|
11 |
+
openvpn \
|
12 |
+
iptables \
|
13 |
+
iproute2 \
|
14 |
+
net-tools \
|
15 |
+
procps \
|
16 |
+
build-essential \
|
17 |
+
python3-dev \
|
18 |
+
&& rm -rf /var/lib/apt/lists/*
|
19 |
+
|
20 |
+
COPY openvpn/server.conf /etc/openvpn/server/server.conf
|
21 |
+
COPY openvpn/ca.crt /etc/openvpn/server/ca.crt
|
22 |
+
COPY openvpn/server.crt /etc/openvpn/server/server.crt
|
23 |
+
COPY openvpn/server.key /etc/openvpn/server/server.key
|
24 |
+
COPY openvpn/dh.pem /etc/openvpn/server/dh.pem
|
25 |
+
|
26 |
+
# Copy requirements and install Python dependencies
|
27 |
+
COPY requirements.txt .
|
28 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
29 |
+
|
30 |
+
# Copy application files
|
31 |
+
COPY . .
|
32 |
+
|
33 |
+
# Create necessary directories
|
34 |
+
RUN mkdir -p /tmp/vpn_client_configs \
|
35 |
+
&& mkdir -p /var/log/openvpn \
|
36 |
+
&& mkdir -p database
|
37 |
+
|
38 |
+
# Set environment variables
|
39 |
+
ENV FLASK_APP=app.py
|
40 |
+
ENV FLASK_ENV=production
|
41 |
+
ENV PORT=7860
|
42 |
+
|
43 |
+
# Expose port
|
44 |
+
EXPOSE 7860
|
45 |
+
|
46 |
+
# Health check
|
47 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
48 |
+
CMD curl -f http://localhost:7860/health || exit 1
|
49 |
+
|
50 |
+
# Run the application
|
51 |
+
CMD ["python", "app.py"]
|
52 |
+
|
__init__.py
ADDED
File without changes
|
__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (173 Bytes). View file
|
|
__pycache__/app.cpython-311.pyc
ADDED
Binary file (7.82 kB). View file
|
|
__pycache__/main.cpython-311.pyc
ADDED
Binary file (3.57 kB). View file
|
|
__pycache__/main_isp.cpython-311.pyc
ADDED
Binary file (10.8 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Virtual ISP Stack with OpenVPN Integration
|
4 |
+
HuggingFace Spaces Entry Point
|
5 |
+
|
6 |
+
This application provides a complete Virtual ISP stack with OpenVPN server integration,
|
7 |
+
allowing users to manage VPN connections, generate client configurations, and monitor
|
8 |
+
network traffic through a RESTful API.
|
9 |
+
"""
|
10 |
+
|
11 |
+
import os
|
12 |
+
import sys
|
13 |
+
import logging
|
14 |
+
|
15 |
+
# Add current directory to Python path
|
16 |
+
sys.path.insert(0, os.path.dirname(__file__))
|
17 |
+
|
18 |
+
from flask import Flask, send_from_directory, jsonify
|
19 |
+
from flask_cors import CORS
|
20 |
+
from models.user import db
|
21 |
+
from routes.user import user_bp
|
22 |
+
from routes.isp_api import init_engines, isp_api
|
23 |
+
|
24 |
+
# Configure logging
|
25 |
+
logging.basicConfig(
|
26 |
+
level=logging.INFO,
|
27 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
28 |
+
)
|
29 |
+
logger = logging.getLogger(__name__)
|
30 |
+
|
31 |
+
# Create Flask application
|
32 |
+
app = Flask(__name__, static_folder=os.path.join(os.path.dirname(__file__), 'static'))
|
33 |
+
|
34 |
+
# Enable CORS for all routes
|
35 |
+
CORS(app, origins="*")
|
36 |
+
|
37 |
+
# Configuration
|
38 |
+
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', 'vpn-isp-stack-secret-key-change-in-production')
|
39 |
+
|
40 |
+
# Database configuration
|
41 |
+
database_path = os.path.join(os.path.dirname(__file__), 'database', 'app.db')
|
42 |
+
app.config['SQLALCHEMY_DATABASE_URI'] = f"sqlite:///{database_path}"
|
43 |
+
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
|
44 |
+
|
45 |
+
# Initialize database
|
46 |
+
db.init_app(app)
|
47 |
+
|
48 |
+
# Register blueprints
|
49 |
+
app.register_blueprint(user_bp, url_prefix='/api')
|
50 |
+
app.register_blueprint(isp_api, url_prefix='/api')
|
51 |
+
|
52 |
+
# Engine configuration
|
53 |
+
app.config.update({
|
54 |
+
"dhcp": {
|
55 |
+
"network": "10.0.0.0/24",
|
56 |
+
"range_start": "10.0.0.10",
|
57 |
+
"range_end": "10.0.0.100",
|
58 |
+
"lease_time": 3600,
|
59 |
+
"gateway": "10.0.0.1",
|
60 |
+
"dns_servers": ["8.8.8.8", "8.8.4.4"]
|
61 |
+
},
|
62 |
+
"nat": {
|
63 |
+
"port_range_start": 10000,
|
64 |
+
"port_range_end": 65535,
|
65 |
+
"session_timeout": 300
|
66 |
+
},
|
67 |
+
"firewall": {
|
68 |
+
"default_policy": "ACCEPT",
|
69 |
+
"log_blocked": True
|
70 |
+
},
|
71 |
+
"tcp": {
|
72 |
+
"initial_window": 65535,
|
73 |
+
"max_retries": 3,
|
74 |
+
"timeout": 30
|
75 |
+
},
|
76 |
+
"openvpn": {
|
77 |
+
"server_ip": "10.8.0.1",
|
78 |
+
"server_port": 1194,
|
79 |
+
"network": "10.8.0.0/24"
|
80 |
+
},
|
81 |
+
"logger": {
|
82 |
+
"log_level": "INFO",
|
83 |
+
"log_file": "/tmp/virtual_isp.log"
|
84 |
+
}
|
85 |
+
})
|
86 |
+
|
87 |
+
# Initialize database tables
|
88 |
+
with app.app_context():
|
89 |
+
try:
|
90 |
+
db.create_all()
|
91 |
+
logger.info("Database tables created successfully")
|
92 |
+
except Exception as e:
|
93 |
+
logger.error(f"Error creating database tables: {e}")
|
94 |
+
|
95 |
+
# Initialize engines
|
96 |
+
try:
|
97 |
+
init_engines(app.config)
|
98 |
+
logger.info("All engines initialized successfully")
|
99 |
+
except Exception as e:
|
100 |
+
logger.error(f"Error initializing engines: {e}")
|
101 |
+
|
102 |
+
@app.route('/')
|
103 |
+
def index():
|
104 |
+
"""Main index page"""
|
105 |
+
return serve_static('')
|
106 |
+
|
107 |
+
@app.route('/health')
|
108 |
+
def health_check():
|
109 |
+
"""Health check endpoint for monitoring"""
|
110 |
+
return jsonify({
|
111 |
+
'status': 'healthy',
|
112 |
+
'service': 'Virtual ISP Stack with OpenVPN',
|
113 |
+
'version': '1.0.0'
|
114 |
+
})
|
115 |
+
|
116 |
+
@app.route('/api')
|
117 |
+
def api_info():
|
118 |
+
"""API information endpoint"""
|
119 |
+
return jsonify({
|
120 |
+
'service': 'Virtual ISP Stack API',
|
121 |
+
'version': '1.0.0',
|
122 |
+
'endpoints': {
|
123 |
+
'openvpn': {
|
124 |
+
'status': '/api/openvpn/status',
|
125 |
+
'start': '/api/openvpn/start',
|
126 |
+
'stop': '/api/openvpn/stop',
|
127 |
+
'clients': '/api/openvpn/clients',
|
128 |
+
'config': '/api/openvpn/config/<client_name>',
|
129 |
+
'stats': '/api/openvpn/stats',
|
130 |
+
'configs': '/api/openvpn/configs'
|
131 |
+
},
|
132 |
+
'dhcp': {
|
133 |
+
'leases': '/api/dhcp/leases'
|
134 |
+
},
|
135 |
+
'nat': {
|
136 |
+
'sessions': '/api/nat/sessions',
|
137 |
+
'stats': '/api/nat/stats'
|
138 |
+
},
|
139 |
+
'firewall': {
|
140 |
+
'rules': '/api/firewall/rules',
|
141 |
+
'logs': '/api/firewall/logs',
|
142 |
+
'stats': '/api/firewall/stats'
|
143 |
+
}
|
144 |
+
}
|
145 |
+
})
|
146 |
+
|
147 |
+
@app.route('/<path:path>')
|
148 |
+
def serve_static(path):
|
149 |
+
"""Serve static files"""
|
150 |
+
static_folder_path = app.static_folder
|
151 |
+
if static_folder_path is None:
|
152 |
+
return jsonify({'error': 'Static folder not configured'}), 404
|
153 |
+
|
154 |
+
if path != "" and os.path.exists(os.path.join(static_folder_path, path)):
|
155 |
+
return send_from_directory(static_folder_path, path)
|
156 |
+
else:
|
157 |
+
index_path = os.path.join(static_folder_path, 'index.html')
|
158 |
+
if os.path.exists(index_path):
|
159 |
+
return send_from_directory(static_folder_path, 'index.html')
|
160 |
+
else:
|
161 |
+
return jsonify({
|
162 |
+
'message': 'Virtual ISP Stack with OpenVPN Integration',
|
163 |
+
'status': 'running',
|
164 |
+
'api_docs': '/api'
|
165 |
+
})
|
166 |
+
|
167 |
+
@app.errorhandler(404)
|
168 |
+
def not_found(error):
|
169 |
+
"""Handle 404 errors"""
|
170 |
+
return jsonify({'error': 'Endpoint not found', 'api_docs': '/api'}), 404
|
171 |
+
|
172 |
+
@app.errorhandler(500)
|
173 |
+
def internal_error(error):
|
174 |
+
"""Handle 500 errors"""
|
175 |
+
return jsonify({'error': 'Internal server error'}), 500
|
176 |
+
|
177 |
+
if __name__ == '__main__':
|
178 |
+
# Get port from environment variable (HuggingFace Spaces uses PORT)
|
179 |
+
port = int(os.environ.get('PORT', 7860))
|
180 |
+
|
181 |
+
logger.info(f"Starting Virtual ISP Stack with OpenVPN on port {port}")
|
182 |
+
|
183 |
+
# Run the application
|
184 |
+
app.run(
|
185 |
+
host='0.0.0.0',
|
186 |
+
port=port,
|
187 |
+
debug=False,
|
188 |
+
threaded=True
|
189 |
+
)
|
190 |
+
|
config.json
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"dhcp": {
|
3 |
+
"network": "10.0.0.0/24",
|
4 |
+
"range_start": "10.0.0.10",
|
5 |
+
"range_end": "10.0.0.100",
|
6 |
+
"lease_time": 3600,
|
7 |
+
"gateway": "10.0.0.1",
|
8 |
+
"dns_servers": [
|
9 |
+
"8.8.8.8",
|
10 |
+
"8.8.4.4"
|
11 |
+
]
|
12 |
+
},
|
13 |
+
"nat": {
|
14 |
+
"port_range_start": 10000,
|
15 |
+
"port_range_end": 65535,
|
16 |
+
"session_timeout": 300,
|
17 |
+
"host_ip": "0.0.0.0"
|
18 |
+
},
|
19 |
+
"firewall": {
|
20 |
+
"default_policy": "ACCEPT",
|
21 |
+
"log_blocked": true,
|
22 |
+
"log_accepted": false,
|
23 |
+
"max_log_entries": 10000,
|
24 |
+
"rules": [
|
25 |
+
{
|
26 |
+
"rule_id": "allow_dhcp",
|
27 |
+
"priority": 1,
|
28 |
+
"action": "ACCEPT",
|
29 |
+
"direction": "BOTH",
|
30 |
+
"dest_port": "67,68",
|
31 |
+
"protocol": "UDP",
|
32 |
+
"description": "Allow DHCP traffic",
|
33 |
+
"enabled": true
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"rule_id": "allow_dns",
|
37 |
+
"priority": 2,
|
38 |
+
"action": "ACCEPT",
|
39 |
+
"direction": "BOTH",
|
40 |
+
"dest_port": "53",
|
41 |
+
"protocol": "UDP",
|
42 |
+
"description": "Allow DNS traffic",
|
43 |
+
"enabled": true
|
44 |
+
}
|
45 |
+
]
|
46 |
+
},
|
47 |
+
"tcp": {
|
48 |
+
"initial_window": 65535,
|
49 |
+
"max_retries": 3,
|
50 |
+
"timeout": 300,
|
51 |
+
"time_wait_timeout": 120,
|
52 |
+
"mss": 1460
|
53 |
+
},
|
54 |
+
"router": {
|
55 |
+
"router_id": "virtual-isp-router",
|
56 |
+
"default_gateway": "10.0.0.1",
|
57 |
+
"interfaces": [
|
58 |
+
{
|
59 |
+
"name": "virtual0",
|
60 |
+
"ip_address": "10.0.0.1",
|
61 |
+
"netmask": "255.255.255.0",
|
62 |
+
"enabled": true,
|
63 |
+
"mtu": 1500
|
64 |
+
}
|
65 |
+
],
|
66 |
+
"static_routes": []
|
67 |
+
},
|
68 |
+
"socket_translator": {
|
69 |
+
"connect_timeout": 10,
|
70 |
+
"read_timeout": 30,
|
71 |
+
"max_connections": 1000,
|
72 |
+
"buffer_size": 8192
|
73 |
+
},
|
74 |
+
"packet_bridge": {
|
75 |
+
"websocket_host": "0.0.0.0",
|
76 |
+
"websocket_port": 8765,
|
77 |
+
"tcp_host": "0.0.0.0",
|
78 |
+
"tcp_port": 8766,
|
79 |
+
"max_clients": 100,
|
80 |
+
"client_timeout": 300
|
81 |
+
},
|
82 |
+
"session_tracker": {
|
83 |
+
"max_sessions": 10000,
|
84 |
+
"session_timeout": 3600,
|
85 |
+
"cleanup_interval": 300,
|
86 |
+
"metrics_retention": 86400
|
87 |
+
},
|
88 |
+
"logger": {
|
89 |
+
"log_level": "INFO",
|
90 |
+
"log_to_file": true,
|
91 |
+
"log_file_path": "/tmp/virtual_isp.log",
|
92 |
+
"log_file_max_size": 10485760,
|
93 |
+
"log_file_backup_count": 5,
|
94 |
+
"log_to_console": true,
|
95 |
+
"structured_logging": true,
|
96 |
+
"max_memory_logs": 10000
|
97 |
+
}
|
98 |
+
}
|
core/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# Core networking modules for the virtual ISP stack
|
2 |
+
|
core/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (156 Bytes). View file
|
|
core/__pycache__/dhcp_server.cpython-311.pyc
ADDED
Binary file (21.2 kB). View file
|
|
core/__pycache__/firewall.cpython-311.pyc
ADDED
Binary file (27.4 kB). View file
|
|
core/__pycache__/ip_parser.cpython-311.pyc
ADDED
Binary file (23.1 kB). View file
|
|
core/__pycache__/logger.cpython-311.pyc
ADDED
Binary file (29.4 kB). View file
|
|
core/__pycache__/nat_engine.cpython-311.pyc
ADDED
Binary file (29.3 kB). View file
|
|
core/__pycache__/openvpn_manager.cpython-311.pyc
ADDED
Binary file (31.8 kB). View file
|
|
core/__pycache__/packet_bridge.cpython-311.pyc
ADDED
Binary file (34.3 kB). View file
|
|
core/__pycache__/session_tracker.cpython-311.pyc
ADDED
Binary file (33.9 kB). View file
|
|
core/__pycache__/socket_translator.cpython-311.pyc
ADDED
Binary file (32.8 kB). View file
|
|
core/__pycache__/tcp_engine.cpython-311.pyc
ADDED
Binary file (33.1 kB). View file
|
|
core/__pycache__/traffic_router.cpython-311.pyc
ADDED
Binary file (22.6 kB). View file
|
|
core/__pycache__/virtual_router.cpython-311.pyc
ADDED
Binary file (30.7 kB). View file
|
|
core/dhcp_server.py
ADDED
@@ -0,0 +1,391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
DHCP Server Module
|
3 |
+
|
4 |
+
Implements a user-space DHCP server that handles:
|
5 |
+
- DHCP DISCOVER → OFFER → REQUEST → ACK sequence
|
6 |
+
- IP lease management
|
7 |
+
- Lease renewals and expiration
|
8 |
+
"""
|
9 |
+
|
10 |
+
import struct
|
11 |
+
import time
|
12 |
+
import socket
|
13 |
+
import threading
|
14 |
+
from typing import Dict, Optional, Tuple
|
15 |
+
from dataclasses import dataclass
|
16 |
+
from enum import Enum
|
17 |
+
|
18 |
+
|
19 |
+
class DHCPMessageType(Enum):
|
20 |
+
DISCOVER = 1
|
21 |
+
OFFER = 2
|
22 |
+
REQUEST = 3
|
23 |
+
DECLINE = 4
|
24 |
+
ACK = 5
|
25 |
+
NAK = 6
|
26 |
+
RELEASE = 7
|
27 |
+
INFORM = 8
|
28 |
+
|
29 |
+
|
30 |
+
@dataclass
|
31 |
+
class DHCPLease:
|
32 |
+
"""Represents a DHCP lease"""
|
33 |
+
mac_address: str
|
34 |
+
ip_address: str
|
35 |
+
lease_time: int
|
36 |
+
lease_start: float
|
37 |
+
state: str = 'BOUND'
|
38 |
+
|
39 |
+
@property
|
40 |
+
def is_expired(self) -> bool:
|
41 |
+
return time.time() > (self.lease_start + self.lease_time)
|
42 |
+
|
43 |
+
@property
|
44 |
+
def remaining_time(self) -> int:
|
45 |
+
remaining = int((self.lease_start + self.lease_time) - time.time())
|
46 |
+
return max(0, remaining)
|
47 |
+
|
48 |
+
|
49 |
+
class DHCPPacket:
|
50 |
+
"""DHCP packet parser and builder"""
|
51 |
+
|
52 |
+
def __init__(self):
|
53 |
+
self.op = 0 # Message op code / message type
|
54 |
+
self.htype = 1 # Hardware address type (Ethernet = 1)
|
55 |
+
self.hlen = 6 # Hardware address length
|
56 |
+
self.hops = 0 # Hops
|
57 |
+
self.xid = 0 # Transaction ID
|
58 |
+
self.secs = 0 # Seconds elapsed
|
59 |
+
self.flags = 0 # Flags
|
60 |
+
self.ciaddr = '0.0.0.0' # Client IP address
|
61 |
+
self.yiaddr = '0.0.0.0' # Your IP address
|
62 |
+
self.siaddr = '0.0.0.0' # Server IP address
|
63 |
+
self.giaddr = '0.0.0.0' # Gateway IP address
|
64 |
+
self.chaddr = b'\x00' * 16 # Client hardware address
|
65 |
+
self.sname = b'\x00' * 64 # Server name
|
66 |
+
self.file = b'\x00' * 128 # Boot file name
|
67 |
+
self.options = {} # DHCP options
|
68 |
+
|
69 |
+
@classmethod
|
70 |
+
def parse(cls, data: bytes) -> 'DHCPPacket':
|
71 |
+
"""Parse DHCP packet from raw bytes"""
|
72 |
+
packet = cls()
|
73 |
+
|
74 |
+
# Parse fixed fields (first 236 bytes)
|
75 |
+
if len(data) < 236:
|
76 |
+
raise ValueError("DHCP packet too short")
|
77 |
+
|
78 |
+
fields = struct.unpack('!BBBBIHH4s4s4s4s16s64s128s', data[:236])
|
79 |
+
packet.op = fields[0]
|
80 |
+
packet.htype = fields[1]
|
81 |
+
packet.hlen = fields[2]
|
82 |
+
packet.hops = fields[3]
|
83 |
+
packet.xid = fields[4]
|
84 |
+
packet.secs = fields[5]
|
85 |
+
packet.flags = fields[6]
|
86 |
+
packet.ciaddr = socket.inet_ntoa(fields[7])
|
87 |
+
packet.yiaddr = socket.inet_ntoa(fields[8])
|
88 |
+
packet.siaddr = socket.inet_ntoa(fields[9])
|
89 |
+
packet.giaddr = socket.inet_ntoa(fields[10])
|
90 |
+
packet.chaddr = fields[11]
|
91 |
+
packet.sname = fields[12]
|
92 |
+
packet.file = fields[13]
|
93 |
+
|
94 |
+
# Parse options (after magic cookie)
|
95 |
+
options_data = data[236:]
|
96 |
+
if len(options_data) >= 4:
|
97 |
+
magic = struct.unpack('!I', options_data[:4])[0]
|
98 |
+
if magic == 0x63825363: # DHCP magic cookie
|
99 |
+
packet.options = packet._parse_options(options_data[4:])
|
100 |
+
|
101 |
+
return packet
|
102 |
+
|
103 |
+
def _parse_options(self, data: bytes) -> Dict[int, bytes]:
|
104 |
+
"""Parse DHCP options"""
|
105 |
+
options = {}
|
106 |
+
i = 0
|
107 |
+
|
108 |
+
while i < len(data):
|
109 |
+
if data[i] == 255: # End option
|
110 |
+
break
|
111 |
+
elif data[i] == 0: # Pad option
|
112 |
+
i += 1
|
113 |
+
continue
|
114 |
+
|
115 |
+
option_type = data[i]
|
116 |
+
if i + 1 >= len(data):
|
117 |
+
break
|
118 |
+
|
119 |
+
option_length = data[i + 1]
|
120 |
+
if i + 2 + option_length > len(data):
|
121 |
+
break
|
122 |
+
|
123 |
+
option_data = data[i + 2:i + 2 + option_length]
|
124 |
+
options[option_type] = option_data
|
125 |
+
i += 2 + option_length
|
126 |
+
|
127 |
+
return options
|
128 |
+
|
129 |
+
def build(self) -> bytes:
|
130 |
+
"""Build DHCP packet as bytes"""
|
131 |
+
# Build fixed fields
|
132 |
+
packet_data = struct.pack(
|
133 |
+
'!BBBBIHH4s4s4s4s16s64s128s',
|
134 |
+
self.op, self.htype, self.hlen, self.hops,
|
135 |
+
self.xid, self.secs, self.flags,
|
136 |
+
socket.inet_aton(self.ciaddr),
|
137 |
+
socket.inet_aton(self.yiaddr),
|
138 |
+
socket.inet_aton(self.siaddr),
|
139 |
+
socket.inet_aton(self.giaddr),
|
140 |
+
self.chaddr, self.sname, self.file
|
141 |
+
)
|
142 |
+
|
143 |
+
# Add magic cookie
|
144 |
+
packet_data += struct.pack('!I', 0x63825363)
|
145 |
+
|
146 |
+
# Add options
|
147 |
+
for option_type, option_data in self.options.items():
|
148 |
+
packet_data += struct.pack('!BB', option_type, len(option_data))
|
149 |
+
packet_data += option_data
|
150 |
+
|
151 |
+
# Add end option
|
152 |
+
packet_data += b'\xff'
|
153 |
+
|
154 |
+
# Pad to minimum size
|
155 |
+
while len(packet_data) < 300:
|
156 |
+
packet_data += b'\x00'
|
157 |
+
|
158 |
+
return packet_data
|
159 |
+
|
160 |
+
def get_mac_address(self) -> str:
|
161 |
+
"""Get client MAC address as string"""
|
162 |
+
return ':'.join(f'{b:02x}' for b in self.chaddr[:6])
|
163 |
+
|
164 |
+
def get_message_type(self) -> Optional[DHCPMessageType]:
|
165 |
+
"""Get DHCP message type from options"""
|
166 |
+
if 53 in self.options and len(self.options[53]) == 1:
|
167 |
+
msg_type = self.options[53][0]
|
168 |
+
try:
|
169 |
+
return DHCPMessageType(msg_type)
|
170 |
+
except ValueError:
|
171 |
+
return None
|
172 |
+
return None
|
173 |
+
|
174 |
+
|
175 |
+
class DHCPServer:
|
176 |
+
"""User-space DHCP server implementation"""
|
177 |
+
|
178 |
+
def __init__(self, config: Dict):
|
179 |
+
self.config = config
|
180 |
+
self.leases: Dict[str, DHCPLease] = {} # MAC -> Lease
|
181 |
+
self.ip_pool = self._build_ip_pool()
|
182 |
+
self.running = False
|
183 |
+
self.server_thread = None
|
184 |
+
self.lock = threading.Lock()
|
185 |
+
|
186 |
+
def _build_ip_pool(self) -> set:
|
187 |
+
"""Build available IP address pool"""
|
188 |
+
network = self.config['network']
|
189 |
+
start_ip = self.config['range_start']
|
190 |
+
end_ip = self.config['range_end']
|
191 |
+
|
192 |
+
# Convert IP addresses to integers for range calculation
|
193 |
+
start_int = struct.unpack('!I', socket.inet_aton(start_ip))[0]
|
194 |
+
end_int = struct.unpack('!I', socket.inet_aton(end_ip))[0]
|
195 |
+
|
196 |
+
pool = set()
|
197 |
+
for ip_int in range(start_int, end_int + 1):
|
198 |
+
ip_str = socket.inet_ntoa(struct.pack('!I', ip_int))
|
199 |
+
pool.add(ip_str)
|
200 |
+
|
201 |
+
return pool
|
202 |
+
|
203 |
+
def _get_available_ip(self) -> Optional[str]:
|
204 |
+
"""Get next available IP address"""
|
205 |
+
with self.lock:
|
206 |
+
# Remove expired leases
|
207 |
+
self._cleanup_expired_leases()
|
208 |
+
|
209 |
+
# Find available IP
|
210 |
+
used_ips = {lease.ip_address for lease in self.leases.values()}
|
211 |
+
available_ips = self.ip_pool - used_ips
|
212 |
+
|
213 |
+
if available_ips:
|
214 |
+
return min(available_ips) # Return lowest available IP
|
215 |
+
return None
|
216 |
+
|
217 |
+
def _cleanup_expired_leases(self):
|
218 |
+
"""Remove expired leases"""
|
219 |
+
expired_macs = [
|
220 |
+
mac for mac, lease in self.leases.items()
|
221 |
+
if lease.is_expired
|
222 |
+
]
|
223 |
+
for mac in expired_macs:
|
224 |
+
del self.leases[mac]
|
225 |
+
|
226 |
+
def _create_dhcp_offer(self, discover_packet: DHCPPacket) -> DHCPPacket:
|
227 |
+
"""Create DHCP OFFER response"""
|
228 |
+
mac_address = discover_packet.get_mac_address()
|
229 |
+
|
230 |
+
# Check for existing lease
|
231 |
+
if mac_address in self.leases and not self.leases[mac_address].is_expired:
|
232 |
+
offered_ip = self.leases[mac_address].ip_address
|
233 |
+
else:
|
234 |
+
offered_ip = self._get_available_ip()
|
235 |
+
if not offered_ip:
|
236 |
+
return None # No available IPs
|
237 |
+
|
238 |
+
# Create OFFER packet
|
239 |
+
offer = DHCPPacket()
|
240 |
+
offer.op = 2 # BOOTREPLY
|
241 |
+
offer.htype = discover_packet.htype
|
242 |
+
offer.hlen = discover_packet.hlen
|
243 |
+
offer.xid = discover_packet.xid
|
244 |
+
offer.yiaddr = offered_ip
|
245 |
+
offer.siaddr = self.config['gateway']
|
246 |
+
offer.chaddr = discover_packet.chaddr
|
247 |
+
|
248 |
+
# Add DHCP options
|
249 |
+
offer.options[53] = bytes([DHCPMessageType.OFFER.value]) # Message type
|
250 |
+
offer.options[1] = socket.inet_aton('255.255.255.0') # Subnet mask
|
251 |
+
offer.options[3] = socket.inet_aton(self.config['gateway']) # Router
|
252 |
+
offer.options[6] = b''.join(socket.inet_aton(dns) for dns in self.config['dns_servers']) # DNS
|
253 |
+
offer.options[51] = struct.pack('!I', self.config['lease_time']) # Lease time
|
254 |
+
offer.options[54] = socket.inet_aton(self.config['gateway']) # DHCP server identifier
|
255 |
+
|
256 |
+
return offer
|
257 |
+
|
258 |
+
def _create_dhcp_ack(self, request_packet: DHCPPacket) -> DHCPPacket:
|
259 |
+
"""Create DHCP ACK response"""
|
260 |
+
mac_address = request_packet.get_mac_address()
|
261 |
+
requested_ip = request_packet.ciaddr
|
262 |
+
|
263 |
+
# If no requested IP in ciaddr, check option 50
|
264 |
+
if requested_ip == '0.0.0.0' and 50 in request_packet.options:
|
265 |
+
requested_ip = socket.inet_ntoa(request_packet.options[50])
|
266 |
+
|
267 |
+
# Validate request
|
268 |
+
if not self._validate_request(mac_address, requested_ip):
|
269 |
+
return self._create_dhcp_nak(request_packet)
|
270 |
+
|
271 |
+
# Create or update lease
|
272 |
+
lease = DHCPLease(
|
273 |
+
mac_address=mac_address,
|
274 |
+
ip_address=requested_ip,
|
275 |
+
lease_time=self.config['lease_time'],
|
276 |
+
lease_start=time.time()
|
277 |
+
)
|
278 |
+
|
279 |
+
with self.lock:
|
280 |
+
self.leases[mac_address] = lease
|
281 |
+
|
282 |
+
# Create ACK packet
|
283 |
+
ack = DHCPPacket()
|
284 |
+
ack.op = 2 # BOOTREPLY
|
285 |
+
ack.htype = request_packet.htype
|
286 |
+
ack.hlen = request_packet.hlen
|
287 |
+
ack.xid = request_packet.xid
|
288 |
+
ack.yiaddr = requested_ip
|
289 |
+
ack.siaddr = self.config['gateway']
|
290 |
+
ack.chaddr = request_packet.chaddr
|
291 |
+
|
292 |
+
# Add DHCP options
|
293 |
+
ack.options[53] = bytes([DHCPMessageType.ACK.value]) # Message type
|
294 |
+
ack.options[1] = socket.inet_aton('255.255.255.0') # Subnet mask
|
295 |
+
ack.options[3] = socket.inet_aton(self.config['gateway']) # Router
|
296 |
+
ack.options[6] = b''.join(socket.inet_aton(dns) for dns in self.config['dns_servers']) # DNS
|
297 |
+
ack.options[51] = struct.pack('!I', self.config['lease_time']) # Lease time
|
298 |
+
ack.options[54] = socket.inet_aton(self.config['gateway']) # DHCP server identifier
|
299 |
+
|
300 |
+
return ack
|
301 |
+
|
302 |
+
def _create_dhcp_nak(self, request_packet: DHCPPacket) -> DHCPPacket:
|
303 |
+
"""Create DHCP NAK response"""
|
304 |
+
nak = DHCPPacket()
|
305 |
+
nak.op = 2 # BOOTREPLY
|
306 |
+
nak.htype = request_packet.htype
|
307 |
+
nak.hlen = request_packet.hlen
|
308 |
+
nak.xid = request_packet.xid
|
309 |
+
nak.chaddr = request_packet.chaddr
|
310 |
+
|
311 |
+
# Add DHCP options
|
312 |
+
nak.options[53] = bytes([DHCPMessageType.NAK.value]) # Message type
|
313 |
+
nak.options[54] = socket.inet_aton(self.config['gateway']) # DHCP server identifier
|
314 |
+
|
315 |
+
return nak
|
316 |
+
|
317 |
+
def _validate_request(self, mac_address: str, requested_ip: str) -> bool:
|
318 |
+
"""Validate DHCP request"""
|
319 |
+
# Check if IP is in our pool
|
320 |
+
if requested_ip not in self.ip_pool:
|
321 |
+
return False
|
322 |
+
|
323 |
+
# Check if IP is available or already assigned to this MAC
|
324 |
+
with self.lock:
|
325 |
+
for mac, lease in self.leases.items():
|
326 |
+
if lease.ip_address == requested_ip:
|
327 |
+
if mac != mac_address and not lease.is_expired:
|
328 |
+
return False # IP already assigned to different MAC
|
329 |
+
|
330 |
+
return True
|
331 |
+
|
332 |
+
def process_packet(self, packet_data: bytes, client_addr: Tuple[str, int]) -> Optional[bytes]:
|
333 |
+
"""Process incoming DHCP packet and return response"""
|
334 |
+
try:
|
335 |
+
packet = DHCPPacket.parse(packet_data)
|
336 |
+
message_type = packet.get_message_type()
|
337 |
+
|
338 |
+
if message_type == DHCPMessageType.DISCOVER:
|
339 |
+
response = self._create_dhcp_offer(packet)
|
340 |
+
elif message_type == DHCPMessageType.REQUEST:
|
341 |
+
response = self._create_dhcp_ack(packet)
|
342 |
+
elif message_type == DHCPMessageType.RELEASE:
|
343 |
+
# Handle lease release
|
344 |
+
mac_address = packet.get_mac_address()
|
345 |
+
with self.lock:
|
346 |
+
if mac_address in self.leases:
|
347 |
+
del self.leases[mac_address]
|
348 |
+
return None
|
349 |
+
else:
|
350 |
+
return None
|
351 |
+
|
352 |
+
if response:
|
353 |
+
return response.build()
|
354 |
+
|
355 |
+
except Exception as e:
|
356 |
+
print(f"Error processing DHCP packet: {e}")
|
357 |
+
return None
|
358 |
+
|
359 |
+
def get_leases(self) -> Dict[str, Dict]:
|
360 |
+
"""Get current lease table"""
|
361 |
+
with self.lock:
|
362 |
+
self._cleanup_expired_leases()
|
363 |
+
return {
|
364 |
+
mac: {
|
365 |
+
'ip_address': lease.ip_address,
|
366 |
+
'lease_time': lease.lease_time,
|
367 |
+
'lease_start': lease.lease_start,
|
368 |
+
'remaining_time': lease.remaining_time,
|
369 |
+
'state': lease.state
|
370 |
+
}
|
371 |
+
for mac, lease in self.leases.items()
|
372 |
+
}
|
373 |
+
|
374 |
+
def release_lease(self, mac_address: str) -> bool:
|
375 |
+
"""Manually release a lease"""
|
376 |
+
with self.lock:
|
377 |
+
if mac_address in self.leases:
|
378 |
+
del self.leases[mac_address]
|
379 |
+
return True
|
380 |
+
return False
|
381 |
+
|
382 |
+
def start(self):
|
383 |
+
"""Start DHCP server (placeholder for integration with packet bridge)"""
|
384 |
+
self.running = True
|
385 |
+
print(f"DHCP server started - Pool: {self.config['range_start']} - {self.config['range_end']}")
|
386 |
+
|
387 |
+
def stop(self):
|
388 |
+
"""Stop DHCP server"""
|
389 |
+
self.running = False
|
390 |
+
print("DHCP server stopped")
|
391 |
+
|
core/firewall.py
ADDED
@@ -0,0 +1,523 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Firewall Module
|
3 |
+
|
4 |
+
Implements packet filtering and access control:
|
5 |
+
- Rule-based packet filtering (allow/block by IP, port, protocol)
|
6 |
+
- Ordered rule processing
|
7 |
+
- Logging and statistics
|
8 |
+
- Dynamic rule management via API
|
9 |
+
"""
|
10 |
+
|
11 |
+
import time
|
12 |
+
import threading
|
13 |
+
import ipaddress
|
14 |
+
import re
|
15 |
+
from typing import Dict, List, Optional, Tuple, Any
|
16 |
+
from dataclasses import dataclass
|
17 |
+
from enum import Enum
|
18 |
+
|
19 |
+
from .ip_parser import ParsedPacket, TCPHeader, UDPHeader
|
20 |
+
|
21 |
+
|
22 |
+
class FirewallAction(Enum):
|
23 |
+
ACCEPT = "ACCEPT"
|
24 |
+
DROP = "DROP"
|
25 |
+
REJECT = "REJECT"
|
26 |
+
|
27 |
+
|
28 |
+
class FirewallDirection(Enum):
|
29 |
+
INBOUND = "INBOUND"
|
30 |
+
OUTBOUND = "OUTBOUND"
|
31 |
+
BOTH = "BOTH"
|
32 |
+
|
33 |
+
|
34 |
+
@dataclass
|
35 |
+
class FirewallRule:
|
36 |
+
"""Represents a firewall rule"""
|
37 |
+
rule_id: str
|
38 |
+
priority: int # Lower number = higher priority
|
39 |
+
action: FirewallAction
|
40 |
+
direction: FirewallDirection
|
41 |
+
|
42 |
+
# Match criteria
|
43 |
+
source_ip: Optional[str] = None # IP or CIDR
|
44 |
+
dest_ip: Optional[str] = None # IP or CIDR
|
45 |
+
source_port: Optional[str] = None # Port or range (e.g., "80", "80-90", "80,443")
|
46 |
+
dest_port: Optional[str] = None # Port or range
|
47 |
+
protocol: Optional[str] = None # TCP, UDP, ICMP, or None for any
|
48 |
+
|
49 |
+
# Metadata
|
50 |
+
description: str = ""
|
51 |
+
enabled: bool = True
|
52 |
+
created_time: float = 0
|
53 |
+
hit_count: int = 0
|
54 |
+
last_hit: Optional[float] = None
|
55 |
+
|
56 |
+
def __post_init__(self):
|
57 |
+
if self.created_time == 0:
|
58 |
+
self.created_time = time.time()
|
59 |
+
|
60 |
+
def record_hit(self):
|
61 |
+
"""Record a rule hit"""
|
62 |
+
self.hit_count += 1
|
63 |
+
self.last_hit = time.time()
|
64 |
+
|
65 |
+
def to_dict(self) -> Dict:
|
66 |
+
"""Convert rule to dictionary"""
|
67 |
+
return {
|
68 |
+
'rule_id': self.rule_id,
|
69 |
+
'priority': self.priority,
|
70 |
+
'action': self.action.value,
|
71 |
+
'direction': self.direction.value,
|
72 |
+
'source_ip': self.source_ip,
|
73 |
+
'dest_ip': self.dest_ip,
|
74 |
+
'source_port': self.source_port,
|
75 |
+
'dest_port': self.dest_port,
|
76 |
+
'protocol': self.protocol,
|
77 |
+
'description': self.description,
|
78 |
+
'enabled': self.enabled,
|
79 |
+
'created_time': self.created_time,
|
80 |
+
'hit_count': self.hit_count,
|
81 |
+
'last_hit': self.last_hit
|
82 |
+
}
|
83 |
+
|
84 |
+
|
85 |
+
@dataclass
|
86 |
+
class FirewallLogEntry:
|
87 |
+
"""Represents a firewall log entry"""
|
88 |
+
timestamp: float
|
89 |
+
action: str
|
90 |
+
rule_id: Optional[str]
|
91 |
+
source_ip: str
|
92 |
+
dest_ip: str
|
93 |
+
source_port: int
|
94 |
+
dest_port: int
|
95 |
+
protocol: str
|
96 |
+
packet_size: int
|
97 |
+
reason: str = ""
|
98 |
+
|
99 |
+
def to_dict(self) -> Dict:
|
100 |
+
"""Convert log entry to dictionary"""
|
101 |
+
return {
|
102 |
+
'timestamp': self.timestamp,
|
103 |
+
'action': self.action,
|
104 |
+
'rule_id': self.rule_id,
|
105 |
+
'source_ip': self.source_ip,
|
106 |
+
'dest_ip': self.dest_ip,
|
107 |
+
'source_port': self.source_port,
|
108 |
+
'dest_port': self.dest_port,
|
109 |
+
'protocol': self.protocol,
|
110 |
+
'packet_size': self.packet_size,
|
111 |
+
'reason': self.reason
|
112 |
+
}
|
113 |
+
|
114 |
+
|
115 |
+
class FirewallEngine:
|
116 |
+
"""Firewall engine implementation"""
|
117 |
+
|
118 |
+
def __init__(self, config: Dict):
|
119 |
+
self.config = config
|
120 |
+
self.rules: Dict[str, FirewallRule] = {}
|
121 |
+
self.logs: List[FirewallLogEntry] = []
|
122 |
+
self.lock = threading.Lock()
|
123 |
+
|
124 |
+
# Configuration
|
125 |
+
self.default_policy = FirewallAction(config.get('default_policy', 'ACCEPT'))
|
126 |
+
self.log_blocked = config.get('log_blocked', True)
|
127 |
+
self.log_accepted = config.get('log_accepted', False)
|
128 |
+
self.max_log_entries = config.get('max_log_entries', 10000)
|
129 |
+
|
130 |
+
# Statistics
|
131 |
+
self.stats = {
|
132 |
+
'packets_processed': 0,
|
133 |
+
'packets_accepted': 0,
|
134 |
+
'packets_dropped': 0,
|
135 |
+
'packets_rejected': 0,
|
136 |
+
'rules_hit': 0,
|
137 |
+
'default_policy_hits': 0
|
138 |
+
}
|
139 |
+
|
140 |
+
# Load initial rules
|
141 |
+
initial_rules = config.get('rules', [])
|
142 |
+
for rule_config in initial_rules:
|
143 |
+
self._add_rule_from_config(rule_config)
|
144 |
+
|
145 |
+
def _add_rule_from_config(self, rule_config: Dict):
|
146 |
+
"""Add rule from configuration"""
|
147 |
+
rule = FirewallRule(
|
148 |
+
rule_id=rule_config['rule_id'],
|
149 |
+
priority=rule_config.get('priority', 100),
|
150 |
+
action=FirewallAction(rule_config['action']),
|
151 |
+
direction=FirewallDirection(rule_config.get('direction', 'BOTH')),
|
152 |
+
source_ip=rule_config.get('source_ip'),
|
153 |
+
dest_ip=rule_config.get('dest_ip'),
|
154 |
+
source_port=rule_config.get('source_port'),
|
155 |
+
dest_port=rule_config.get('dest_port'),
|
156 |
+
protocol=rule_config.get('protocol'),
|
157 |
+
description=rule_config.get('description', ''),
|
158 |
+
enabled=rule_config.get('enabled', True)
|
159 |
+
)
|
160 |
+
|
161 |
+
with self.lock:
|
162 |
+
self.rules[rule.rule_id] = rule
|
163 |
+
|
164 |
+
def _match_ip(self, ip: str, pattern: str) -> bool:
|
165 |
+
"""Match IP address against pattern (IP or CIDR)"""
|
166 |
+
try:
|
167 |
+
if '/' in pattern:
|
168 |
+
# CIDR notation
|
169 |
+
network = ipaddress.ip_network(pattern, strict=False)
|
170 |
+
return ipaddress.ip_address(ip) in network
|
171 |
+
else:
|
172 |
+
# Exact IP match
|
173 |
+
return ip == pattern
|
174 |
+
except (ipaddress.AddressValueError, ValueError):
|
175 |
+
return False
|
176 |
+
|
177 |
+
def _match_port(self, port: int, pattern: str) -> bool:
|
178 |
+
"""Match port against pattern (port, range, or list)"""
|
179 |
+
try:
|
180 |
+
if ',' in pattern:
|
181 |
+
# List of ports: "80,443,8080"
|
182 |
+
ports = [int(p.strip()) for p in pattern.split(',')]
|
183 |
+
return port in ports
|
184 |
+
elif '-' in pattern:
|
185 |
+
# Port range: "80-90"
|
186 |
+
start, end = map(int, pattern.split('-', 1))
|
187 |
+
return start <= port <= end
|
188 |
+
else:
|
189 |
+
# Single port: "80"
|
190 |
+
return port == int(pattern)
|
191 |
+
except (ValueError, TypeError):
|
192 |
+
return False
|
193 |
+
|
194 |
+
def _match_protocol(self, protocol: str, pattern: str) -> bool:
|
195 |
+
"""Match protocol against pattern"""
|
196 |
+
if pattern is None:
|
197 |
+
return True # Match any protocol
|
198 |
+
return protocol.upper() == pattern.upper()
|
199 |
+
|
200 |
+
def _evaluate_rule(self, rule: FirewallRule, packet: ParsedPacket, direction: FirewallDirection) -> bool:
|
201 |
+
"""Evaluate if a rule matches a packet"""
|
202 |
+
if not rule.enabled:
|
203 |
+
return False
|
204 |
+
|
205 |
+
# Check direction
|
206 |
+
if rule.direction != FirewallDirection.BOTH and rule.direction != direction:
|
207 |
+
return False
|
208 |
+
|
209 |
+
# Check source IP
|
210 |
+
if rule.source_ip and not self._match_ip(packet.ip_header.source_ip, rule.source_ip):
|
211 |
+
return False
|
212 |
+
|
213 |
+
# Check destination IP
|
214 |
+
if rule.dest_ip and not self._match_ip(packet.ip_header.dest_ip, rule.dest_ip):
|
215 |
+
return False
|
216 |
+
|
217 |
+
# Check protocol
|
218 |
+
if packet.transport_header:
|
219 |
+
if isinstance(packet.transport_header, TCPHeader):
|
220 |
+
protocol = 'TCP'
|
221 |
+
source_port = packet.transport_header.source_port
|
222 |
+
dest_port = packet.transport_header.dest_port
|
223 |
+
elif isinstance(packet.transport_header, UDPHeader):
|
224 |
+
protocol = 'UDP'
|
225 |
+
source_port = packet.transport_header.source_port
|
226 |
+
dest_port = packet.transport_header.dest_port
|
227 |
+
else:
|
228 |
+
protocol = 'OTHER'
|
229 |
+
source_port = 0
|
230 |
+
dest_port = 0
|
231 |
+
else:
|
232 |
+
protocol = 'OTHER'
|
233 |
+
source_port = 0
|
234 |
+
dest_port = 0
|
235 |
+
|
236 |
+
if not self._match_protocol(protocol, rule.protocol):
|
237 |
+
return False
|
238 |
+
|
239 |
+
# Check source port
|
240 |
+
if rule.source_port and not self._match_port(source_port, rule.source_port):
|
241 |
+
return False
|
242 |
+
|
243 |
+
# Check destination port
|
244 |
+
if rule.dest_port and not self._match_port(dest_port, rule.dest_port):
|
245 |
+
return False
|
246 |
+
|
247 |
+
return True
|
248 |
+
|
249 |
+
def _log_packet(self, action: str, packet: ParsedPacket, rule_id: Optional[str] = None, reason: str = ""):
|
250 |
+
"""Log packet processing"""
|
251 |
+
if not (self.log_blocked or self.log_accepted):
|
252 |
+
return
|
253 |
+
|
254 |
+
# Only log if configured
|
255 |
+
if action == 'ACCEPT' and not self.log_accepted:
|
256 |
+
return
|
257 |
+
if action in ['DROP', 'REJECT'] and not self.log_blocked:
|
258 |
+
return
|
259 |
+
|
260 |
+
# Extract packet information
|
261 |
+
if packet.transport_header:
|
262 |
+
if isinstance(packet.transport_header, (TCPHeader, UDPHeader)):
|
263 |
+
source_port = packet.transport_header.source_port
|
264 |
+
dest_port = packet.transport_header.dest_port
|
265 |
+
protocol = 'TCP' if isinstance(packet.transport_header, TCPHeader) else 'UDP'
|
266 |
+
else:
|
267 |
+
source_port = 0
|
268 |
+
dest_port = 0
|
269 |
+
protocol = 'OTHER'
|
270 |
+
else:
|
271 |
+
source_port = 0
|
272 |
+
dest_port = 0
|
273 |
+
protocol = 'OTHER'
|
274 |
+
|
275 |
+
log_entry = FirewallLogEntry(
|
276 |
+
timestamp=time.time(),
|
277 |
+
action=action,
|
278 |
+
rule_id=rule_id,
|
279 |
+
source_ip=packet.ip_header.source_ip,
|
280 |
+
dest_ip=packet.ip_header.dest_ip,
|
281 |
+
source_port=source_port,
|
282 |
+
dest_port=dest_port,
|
283 |
+
protocol=protocol,
|
284 |
+
packet_size=len(packet.raw_packet),
|
285 |
+
reason=reason
|
286 |
+
)
|
287 |
+
|
288 |
+
with self.lock:
|
289 |
+
self.logs.append(log_entry)
|
290 |
+
|
291 |
+
# Trim logs if too many
|
292 |
+
if len(self.logs) > self.max_log_entries:
|
293 |
+
self.logs = self.logs[-self.max_log_entries:]
|
294 |
+
|
295 |
+
def process_packet(self, packet: ParsedPacket, direction: FirewallDirection) -> FirewallAction:
|
296 |
+
"""Process packet through firewall rules"""
|
297 |
+
self.stats['packets_processed'] += 1
|
298 |
+
|
299 |
+
# Get sorted rules by priority
|
300 |
+
with self.lock:
|
301 |
+
sorted_rules = sorted(self.rules.values(), key=lambda r: r.priority)
|
302 |
+
|
303 |
+
# Evaluate rules in order
|
304 |
+
for rule in sorted_rules:
|
305 |
+
if self._evaluate_rule(rule, packet, direction):
|
306 |
+
rule.record_hit()
|
307 |
+
self.stats['rules_hit'] += 1
|
308 |
+
|
309 |
+
# Log the action
|
310 |
+
self._log_packet(rule.action.value, packet, rule.rule_id, f"Matched rule: {rule.description}")
|
311 |
+
|
312 |
+
# Update statistics
|
313 |
+
if rule.action == FirewallAction.ACCEPT:
|
314 |
+
self.stats['packets_accepted'] += 1
|
315 |
+
elif rule.action == FirewallAction.DROP:
|
316 |
+
self.stats['packets_dropped'] += 1
|
317 |
+
elif rule.action == FirewallAction.REJECT:
|
318 |
+
self.stats['packets_rejected'] += 1
|
319 |
+
|
320 |
+
return rule.action
|
321 |
+
|
322 |
+
# No rule matched, apply default policy
|
323 |
+
self.stats['default_policy_hits'] += 1
|
324 |
+
self._log_packet(self.default_policy.value, packet, None, "Default policy")
|
325 |
+
|
326 |
+
if self.default_policy == FirewallAction.ACCEPT:
|
327 |
+
self.stats['packets_accepted'] += 1
|
328 |
+
elif self.default_policy == FirewallAction.DROP:
|
329 |
+
self.stats['packets_dropped'] += 1
|
330 |
+
elif self.default_policy == FirewallAction.REJECT:
|
331 |
+
self.stats['packets_rejected'] += 1
|
332 |
+
|
333 |
+
return self.default_policy
|
334 |
+
|
335 |
+
def add_rule(self, rule: FirewallRule) -> bool:
|
336 |
+
"""Add firewall rule"""
|
337 |
+
with self.lock:
|
338 |
+
if rule.rule_id in self.rules:
|
339 |
+
return False
|
340 |
+
self.rules[rule.rule_id] = rule
|
341 |
+
return True
|
342 |
+
|
343 |
+
def remove_rule(self, rule_id: str) -> bool:
|
344 |
+
"""Remove firewall rule"""
|
345 |
+
with self.lock:
|
346 |
+
if rule_id in self.rules:
|
347 |
+
del self.rules[rule_id]
|
348 |
+
return True
|
349 |
+
return False
|
350 |
+
|
351 |
+
def update_rule(self, rule_id: str, **kwargs) -> bool:
|
352 |
+
"""Update firewall rule"""
|
353 |
+
with self.lock:
|
354 |
+
if rule_id not in self.rules:
|
355 |
+
return False
|
356 |
+
|
357 |
+
rule = self.rules[rule_id]
|
358 |
+
for key, value in kwargs.items():
|
359 |
+
if hasattr(rule, key):
|
360 |
+
if key in ['action', 'direction']:
|
361 |
+
# Handle enum values
|
362 |
+
if key == 'action':
|
363 |
+
value = FirewallAction(value)
|
364 |
+
elif key == 'direction':
|
365 |
+
value = FirewallDirection(value)
|
366 |
+
setattr(rule, key, value)
|
367 |
+
|
368 |
+
return True
|
369 |
+
|
370 |
+
def enable_rule(self, rule_id: str) -> bool:
|
371 |
+
"""Enable firewall rule"""
|
372 |
+
return self.update_rule(rule_id, enabled=True)
|
373 |
+
|
374 |
+
def disable_rule(self, rule_id: str) -> bool:
|
375 |
+
"""Disable firewall rule"""
|
376 |
+
return self.update_rule(rule_id, enabled=False)
|
377 |
+
|
378 |
+
def get_rules(self) -> List[Dict]:
|
379 |
+
"""Get all firewall rules"""
|
380 |
+
with self.lock:
|
381 |
+
return [rule.to_dict() for rule in sorted(self.rules.values(), key=lambda r: r.priority)]
|
382 |
+
|
383 |
+
def get_rule(self, rule_id: str) -> Optional[Dict]:
|
384 |
+
"""Get specific firewall rule"""
|
385 |
+
with self.lock:
|
386 |
+
rule = self.rules.get(rule_id)
|
387 |
+
return rule.to_dict() if rule else None
|
388 |
+
|
389 |
+
def get_logs(self, limit: int = 100, filter_action: Optional[str] = None) -> List[Dict]:
|
390 |
+
"""Get firewall logs"""
|
391 |
+
with self.lock:
|
392 |
+
logs = self.logs.copy()
|
393 |
+
|
394 |
+
# Filter by action if specified
|
395 |
+
if filter_action:
|
396 |
+
logs = [log for log in logs if log.action == filter_action.upper()]
|
397 |
+
|
398 |
+
# Return most recent logs
|
399 |
+
return [log.to_dict() for log in logs[-limit:]]
|
400 |
+
|
401 |
+
def clear_logs(self):
|
402 |
+
"""Clear firewall logs"""
|
403 |
+
with self.lock:
|
404 |
+
self.logs.clear()
|
405 |
+
|
406 |
+
def get_stats(self) -> Dict:
|
407 |
+
"""Get firewall statistics"""
|
408 |
+
with self.lock:
|
409 |
+
stats = self.stats.copy()
|
410 |
+
stats['total_rules'] = len(self.rules)
|
411 |
+
stats['enabled_rules'] = sum(1 for rule in self.rules.values() if rule.enabled)
|
412 |
+
stats['log_entries'] = len(self.logs)
|
413 |
+
stats['default_policy'] = self.default_policy.value
|
414 |
+
|
415 |
+
return stats
|
416 |
+
|
417 |
+
def reset_stats(self):
|
418 |
+
"""Reset firewall statistics"""
|
419 |
+
self.stats = {
|
420 |
+
'packets_processed': 0,
|
421 |
+
'packets_accepted': 0,
|
422 |
+
'packets_dropped': 0,
|
423 |
+
'packets_rejected': 0,
|
424 |
+
'rules_hit': 0,
|
425 |
+
'default_policy_hits': 0
|
426 |
+
}
|
427 |
+
|
428 |
+
# Reset rule hit counts
|
429 |
+
with self.lock:
|
430 |
+
for rule in self.rules.values():
|
431 |
+
rule.hit_count = 0
|
432 |
+
rule.last_hit = None
|
433 |
+
|
434 |
+
def set_default_policy(self, policy: str):
|
435 |
+
"""Set default firewall policy"""
|
436 |
+
self.default_policy = FirewallAction(policy.upper())
|
437 |
+
|
438 |
+
def export_rules(self) -> List[Dict]:
|
439 |
+
"""Export rules for backup/configuration"""
|
440 |
+
return self.get_rules()
|
441 |
+
|
442 |
+
def import_rules(self, rules_config: List[Dict], replace: bool = False):
|
443 |
+
"""Import rules from configuration"""
|
444 |
+
if replace:
|
445 |
+
with self.lock:
|
446 |
+
self.rules.clear()
|
447 |
+
|
448 |
+
for rule_config in rules_config:
|
449 |
+
self._add_rule_from_config(rule_config)
|
450 |
+
|
451 |
+
|
452 |
+
class FirewallRuleBuilder:
|
453 |
+
"""Helper class to build firewall rules"""
|
454 |
+
|
455 |
+
def __init__(self, rule_id: str):
|
456 |
+
self.rule_id = rule_id
|
457 |
+
self.priority = 100
|
458 |
+
self.action = FirewallAction.ACCEPT
|
459 |
+
self.direction = FirewallDirection.BOTH
|
460 |
+
self.source_ip = None
|
461 |
+
self.dest_ip = None
|
462 |
+
self.source_port = None
|
463 |
+
self.dest_port = None
|
464 |
+
self.protocol = None
|
465 |
+
self.description = ""
|
466 |
+
self.enabled = True
|
467 |
+
|
468 |
+
def set_priority(self, priority: int) -> 'FirewallRuleBuilder':
|
469 |
+
self.priority = priority
|
470 |
+
return self
|
471 |
+
|
472 |
+
def set_action(self, action: str) -> 'FirewallRuleBuilder':
|
473 |
+
self.action = FirewallAction(action.upper())
|
474 |
+
return self
|
475 |
+
|
476 |
+
def set_direction(self, direction: str) -> 'FirewallRuleBuilder':
|
477 |
+
self.direction = FirewallDirection(direction.upper())
|
478 |
+
return self
|
479 |
+
|
480 |
+
def set_source_ip(self, ip: str) -> 'FirewallRuleBuilder':
|
481 |
+
self.source_ip = ip
|
482 |
+
return self
|
483 |
+
|
484 |
+
def set_dest_ip(self, ip: str) -> 'FirewallRuleBuilder':
|
485 |
+
self.dest_ip = ip
|
486 |
+
return self
|
487 |
+
|
488 |
+
def set_source_port(self, port: str) -> 'FirewallRuleBuilder':
|
489 |
+
self.source_port = port
|
490 |
+
return self
|
491 |
+
|
492 |
+
def set_dest_port(self, port: str) -> 'FirewallRuleBuilder':
|
493 |
+
self.dest_port = port
|
494 |
+
return self
|
495 |
+
|
496 |
+
def set_protocol(self, protocol: str) -> 'FirewallRuleBuilder':
|
497 |
+
self.protocol = protocol.upper()
|
498 |
+
return self
|
499 |
+
|
500 |
+
def set_description(self, description: str) -> 'FirewallRuleBuilder':
|
501 |
+
self.description = description
|
502 |
+
return self
|
503 |
+
|
504 |
+
def set_enabled(self, enabled: bool) -> 'FirewallRuleBuilder':
|
505 |
+
self.enabled = enabled
|
506 |
+
return self
|
507 |
+
|
508 |
+
def build(self) -> FirewallRule:
|
509 |
+
"""Build the firewall rule"""
|
510 |
+
return FirewallRule(
|
511 |
+
rule_id=self.rule_id,
|
512 |
+
priority=self.priority,
|
513 |
+
action=self.action,
|
514 |
+
direction=self.direction,
|
515 |
+
source_ip=self.source_ip,
|
516 |
+
dest_ip=self.dest_ip,
|
517 |
+
source_port=self.source_port,
|
518 |
+
dest_port=self.dest_port,
|
519 |
+
protocol=self.protocol,
|
520 |
+
description=self.description,
|
521 |
+
enabled=self.enabled
|
522 |
+
)
|
523 |
+
|
core/ip_parser.py
ADDED
@@ -0,0 +1,546 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
IP Parser/Assembler Module
|
3 |
+
|
4 |
+
Handles IPv4 packet parsing and construction:
|
5 |
+
- Parse IPv4, UDP, and TCP headers
|
6 |
+
- Calculate and verify checksums
|
7 |
+
- Handle packet fragmentation and reassembly
|
8 |
+
- Support various IP options
|
9 |
+
"""
|
10 |
+
|
11 |
+
import struct
|
12 |
+
import socket
|
13 |
+
from typing import Dict, List, Optional, Tuple
|
14 |
+
from dataclasses import dataclass
|
15 |
+
from enum import Enum
|
16 |
+
|
17 |
+
|
18 |
+
class IPProtocol(Enum):
|
19 |
+
ICMP = 1
|
20 |
+
TCP = 6
|
21 |
+
UDP = 17
|
22 |
+
|
23 |
+
|
24 |
+
@dataclass
|
25 |
+
class IPv4Header:
|
26 |
+
"""IPv4 header structure"""
|
27 |
+
version: int = 4
|
28 |
+
ihl: int = 5 # Internet Header Length (in 32-bit words)
|
29 |
+
tos: int = 0 # Type of Service
|
30 |
+
total_length: int = 0
|
31 |
+
identification: int = 0
|
32 |
+
flags: int = 0 # 3 bits: Reserved, Don't Fragment, More Fragments
|
33 |
+
fragment_offset: int = 0 # 13 bits
|
34 |
+
ttl: int = 64 # Time to Live
|
35 |
+
protocol: int = 0
|
36 |
+
header_checksum: int = 0
|
37 |
+
source_ip: str = '0.0.0.0'
|
38 |
+
dest_ip: str = '0.0.0.0'
|
39 |
+
options: bytes = b''
|
40 |
+
|
41 |
+
@property
|
42 |
+
def header_length(self) -> int:
|
43 |
+
"""Get header length in bytes"""
|
44 |
+
return self.ihl * 4
|
45 |
+
|
46 |
+
@property
|
47 |
+
def dont_fragment(self) -> bool:
|
48 |
+
"""Check if Don't Fragment flag is set"""
|
49 |
+
return bool(self.flags & 0x2)
|
50 |
+
|
51 |
+
@property
|
52 |
+
def more_fragments(self) -> bool:
|
53 |
+
"""Check if More Fragments flag is set"""
|
54 |
+
return bool(self.flags & 0x1)
|
55 |
+
|
56 |
+
@property
|
57 |
+
def is_fragment(self) -> bool:
|
58 |
+
"""Check if this is a fragment"""
|
59 |
+
return self.more_fragments or self.fragment_offset > 0
|
60 |
+
|
61 |
+
|
62 |
+
@dataclass
|
63 |
+
class TCPHeader:
|
64 |
+
"""TCP header structure"""
|
65 |
+
source_port: int = 0
|
66 |
+
dest_port: int = 0
|
67 |
+
seq_num: int = 0
|
68 |
+
ack_num: int = 0
|
69 |
+
data_offset: int = 5 # Header length in 32-bit words
|
70 |
+
reserved: int = 0
|
71 |
+
flags: int = 0 # 9 bits: NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN
|
72 |
+
window_size: int = 65535
|
73 |
+
checksum: int = 0
|
74 |
+
urgent_pointer: int = 0
|
75 |
+
options: bytes = b''
|
76 |
+
|
77 |
+
@property
|
78 |
+
def header_length(self) -> int:
|
79 |
+
"""Get header length in bytes"""
|
80 |
+
return self.data_offset * 4
|
81 |
+
|
82 |
+
# TCP Flag properties
|
83 |
+
@property
|
84 |
+
def fin(self) -> bool:
|
85 |
+
return bool(self.flags & 0x01)
|
86 |
+
|
87 |
+
@property
|
88 |
+
def syn(self) -> bool:
|
89 |
+
return bool(self.flags & 0x02)
|
90 |
+
|
91 |
+
@property
|
92 |
+
def rst(self) -> bool:
|
93 |
+
return bool(self.flags & 0x04)
|
94 |
+
|
95 |
+
@property
|
96 |
+
def psh(self) -> bool:
|
97 |
+
return bool(self.flags & 0x08)
|
98 |
+
|
99 |
+
@property
|
100 |
+
def ack(self) -> bool:
|
101 |
+
return bool(self.flags & 0x10)
|
102 |
+
|
103 |
+
@property
|
104 |
+
def urg(self) -> bool:
|
105 |
+
return bool(self.flags & 0x20)
|
106 |
+
|
107 |
+
def set_flag(self, flag_name: str, value: bool = True):
|
108 |
+
"""Set TCP flag"""
|
109 |
+
flag_bits = {
|
110 |
+
'fin': 0x01, 'syn': 0x02, 'rst': 0x04, 'psh': 0x08,
|
111 |
+
'ack': 0x10, 'urg': 0x20, 'ece': 0x40, 'cwr': 0x80, 'ns': 0x100
|
112 |
+
}
|
113 |
+
|
114 |
+
if flag_name.lower() in flag_bits:
|
115 |
+
bit = flag_bits[flag_name.lower()]
|
116 |
+
if value:
|
117 |
+
self.flags |= bit
|
118 |
+
else:
|
119 |
+
self.flags &= ~bit
|
120 |
+
|
121 |
+
|
122 |
+
@dataclass
|
123 |
+
class UDPHeader:
|
124 |
+
"""UDP header structure"""
|
125 |
+
source_port: int = 0
|
126 |
+
dest_port: int = 0
|
127 |
+
length: int = 8 # Header + data length
|
128 |
+
checksum: int = 0
|
129 |
+
|
130 |
+
@property
|
131 |
+
def header_length(self) -> int:
|
132 |
+
"""Get header length in bytes (always 8 for UDP)"""
|
133 |
+
return 8
|
134 |
+
|
135 |
+
|
136 |
+
@dataclass
|
137 |
+
class ParsedPacket:
|
138 |
+
"""Parsed packet structure"""
|
139 |
+
ip_header: IPv4Header
|
140 |
+
transport_header: Optional[object] = None # TCPHeader or UDPHeader
|
141 |
+
payload: bytes = b''
|
142 |
+
raw_packet: bytes = b''
|
143 |
+
|
144 |
+
|
145 |
+
class IPParser:
|
146 |
+
"""IPv4 packet parser and assembler"""
|
147 |
+
|
148 |
+
@staticmethod
|
149 |
+
def calculate_checksum(data: bytes) -> int:
|
150 |
+
"""Calculate Internet checksum"""
|
151 |
+
# Pad data to even length
|
152 |
+
if len(data) % 2:
|
153 |
+
data += b'\x00'
|
154 |
+
|
155 |
+
checksum = 0
|
156 |
+
for i in range(0, len(data), 2):
|
157 |
+
word = (data[i] << 8) + data[i + 1]
|
158 |
+
checksum += word
|
159 |
+
|
160 |
+
# Add carry bits
|
161 |
+
while checksum >> 16:
|
162 |
+
checksum = (checksum & 0xFFFF) + (checksum >> 16)
|
163 |
+
|
164 |
+
# One's complement
|
165 |
+
return (~checksum) & 0xFFFF
|
166 |
+
|
167 |
+
@staticmethod
|
168 |
+
def verify_checksum(data: bytes, checksum: int) -> bool:
|
169 |
+
"""Verify Internet checksum"""
|
170 |
+
calculated = IPParser.calculate_checksum(data)
|
171 |
+
return calculated == checksum or (calculated + checksum) == 0xFFFF
|
172 |
+
|
173 |
+
@classmethod
|
174 |
+
def parse_ipv4_header(cls, data: bytes) -> Tuple[IPv4Header, int]:
|
175 |
+
"""Parse IPv4 header from raw bytes"""
|
176 |
+
if len(data) < 20:
|
177 |
+
raise ValueError("IPv4 header too short")
|
178 |
+
|
179 |
+
# Parse fixed part of header
|
180 |
+
header_data = struct.unpack('!BBHHHBBH4s4s', data[:20])
|
181 |
+
|
182 |
+
header = IPv4Header()
|
183 |
+
version_ihl = header_data[0]
|
184 |
+
header.version = (version_ihl >> 4) & 0xF
|
185 |
+
header.ihl = version_ihl & 0xF
|
186 |
+
header.tos = header_data[1]
|
187 |
+
header.total_length = header_data[2]
|
188 |
+
header.identification = header_data[3]
|
189 |
+
flags_fragment = header_data[4]
|
190 |
+
header.flags = (flags_fragment >> 13) & 0x7
|
191 |
+
header.fragment_offset = flags_fragment & 0x1FFF
|
192 |
+
header.ttl = header_data[5]
|
193 |
+
header.protocol = header_data[6]
|
194 |
+
header.header_checksum = header_data[7]
|
195 |
+
header.source_ip = socket.inet_ntoa(header_data[8])
|
196 |
+
header.dest_ip = socket.inet_ntoa(header_data[9])
|
197 |
+
|
198 |
+
# Validate version
|
199 |
+
if header.version != 4:
|
200 |
+
raise ValueError(f"Unsupported IP version: {header.version}")
|
201 |
+
|
202 |
+
# Parse options if present
|
203 |
+
options_length = header.header_length - 20
|
204 |
+
if options_length > 0:
|
205 |
+
if len(data) < 20 + options_length:
|
206 |
+
raise ValueError("IPv4 options truncated")
|
207 |
+
header.options = data[20:20 + options_length]
|
208 |
+
|
209 |
+
return header, header.header_length
|
210 |
+
|
211 |
+
@classmethod
|
212 |
+
def parse_tcp_header(cls, data: bytes) -> Tuple[TCPHeader, int]:
|
213 |
+
"""Parse TCP header from raw bytes"""
|
214 |
+
if len(data) < 20:
|
215 |
+
raise ValueError("TCP header too short")
|
216 |
+
|
217 |
+
# Parse fixed part of header
|
218 |
+
header_data = struct.unpack('!HHIIBBHHH', data[:20])
|
219 |
+
|
220 |
+
header = TCPHeader()
|
221 |
+
header.source_port = header_data[0]
|
222 |
+
header.dest_port = header_data[1]
|
223 |
+
header.seq_num = header_data[2]
|
224 |
+
header.ack_num = header_data[3]
|
225 |
+
offset_reserved = header_data[4]
|
226 |
+
header.data_offset = (offset_reserved >> 4) & 0xF
|
227 |
+
header.reserved = (offset_reserved >> 1) & 0x7
|
228 |
+
header.flags = ((offset_reserved & 0x1) << 8) | header_data[5]
|
229 |
+
header.window_size = header_data[6]
|
230 |
+
header.checksum = header_data[7]
|
231 |
+
header.urgent_pointer = header_data[8]
|
232 |
+
|
233 |
+
# Parse options if present
|
234 |
+
options_length = header.header_length - 20
|
235 |
+
if options_length > 0:
|
236 |
+
if len(data) < 20 + options_length:
|
237 |
+
raise ValueError("TCP options truncated")
|
238 |
+
header.options = data[20:20 + options_length]
|
239 |
+
|
240 |
+
return header, header.header_length
|
241 |
+
|
242 |
+
@classmethod
|
243 |
+
def parse_udp_header(cls, data: bytes) -> Tuple[UDPHeader, int]:
|
244 |
+
"""Parse UDP header from raw bytes"""
|
245 |
+
if len(data) < 8:
|
246 |
+
raise ValueError("UDP header too short")
|
247 |
+
|
248 |
+
header_data = struct.unpack('!HHHH', data[:8])
|
249 |
+
|
250 |
+
header = UDPHeader()
|
251 |
+
header.source_port = header_data[0]
|
252 |
+
header.dest_port = header_data[1]
|
253 |
+
header.length = header_data[2]
|
254 |
+
header.checksum = header_data[3]
|
255 |
+
|
256 |
+
return header, 8
|
257 |
+
|
258 |
+
@classmethod
|
259 |
+
def parse_packet(cls, data: bytes) -> ParsedPacket:
|
260 |
+
"""Parse complete packet"""
|
261 |
+
packet = ParsedPacket(raw_packet=data)
|
262 |
+
|
263 |
+
# Parse IP header
|
264 |
+
packet.ip_header, ip_header_len = cls.parse_ipv4_header(data)
|
265 |
+
|
266 |
+
# Extract payload after IP header
|
267 |
+
ip_payload = data[ip_header_len:packet.ip_header.total_length]
|
268 |
+
|
269 |
+
# Parse transport layer header
|
270 |
+
if packet.ip_header.protocol == IPProtocol.TCP.value:
|
271 |
+
packet.transport_header, transport_header_len = cls.parse_tcp_header(ip_payload)
|
272 |
+
packet.payload = ip_payload[transport_header_len:]
|
273 |
+
elif packet.ip_header.protocol == IPProtocol.UDP.value:
|
274 |
+
packet.transport_header, transport_header_len = cls.parse_udp_header(ip_payload)
|
275 |
+
packet.payload = ip_payload[transport_header_len:]
|
276 |
+
else:
|
277 |
+
# Unsupported protocol, treat as raw payload
|
278 |
+
packet.payload = ip_payload
|
279 |
+
|
280 |
+
return packet
|
281 |
+
|
282 |
+
@classmethod
|
283 |
+
def build_ipv4_header(cls, header: IPv4Header) -> bytes:
|
284 |
+
"""Build IPv4 header as bytes"""
|
285 |
+
# Calculate header length including options
|
286 |
+
header.ihl = (20 + len(header.options) + 3) // 4 # Round up to 32-bit boundary
|
287 |
+
|
288 |
+
# Build header without checksum
|
289 |
+
version_ihl = (header.version << 4) | header.ihl
|
290 |
+
flags_fragment = (header.flags << 13) | header.fragment_offset
|
291 |
+
|
292 |
+
header_data = struct.pack(
|
293 |
+
'!BBHHHBBH4s4s',
|
294 |
+
version_ihl, header.tos, header.total_length,
|
295 |
+
header.identification, flags_fragment,
|
296 |
+
header.ttl, header.protocol, 0, # Checksum = 0 for calculation
|
297 |
+
socket.inet_aton(header.source_ip),
|
298 |
+
socket.inet_aton(header.dest_ip)
|
299 |
+
)
|
300 |
+
|
301 |
+
# Add options and padding
|
302 |
+
if header.options:
|
303 |
+
header_data += header.options
|
304 |
+
# Pad to 32-bit boundary
|
305 |
+
padding_needed = (header.ihl * 4) - len(header_data)
|
306 |
+
if padding_needed > 0:
|
307 |
+
header_data += b'\x00' * padding_needed
|
308 |
+
|
309 |
+
# Calculate and insert checksum
|
310 |
+
checksum = cls.calculate_checksum(header_data)
|
311 |
+
header_data = header_data[:10] + struct.pack('!H', checksum) + header_data[12:]
|
312 |
+
|
313 |
+
return header_data
|
314 |
+
|
315 |
+
@classmethod
|
316 |
+
def build_tcp_header(cls, header: TCPHeader, source_ip: str, dest_ip: str, payload: bytes) -> bytes:
|
317 |
+
"""Build TCP header as bytes with checksum"""
|
318 |
+
# Calculate header length including options
|
319 |
+
header.data_offset = (20 + len(header.options) + 3) // 4 # Round up to 32-bit boundary
|
320 |
+
|
321 |
+
# Build header without checksum
|
322 |
+
offset_reserved_flags = (header.data_offset << 12) | (header.reserved << 9) | header.flags
|
323 |
+
|
324 |
+
header_data = struct.pack(
|
325 |
+
'!HHIIHHH',
|
326 |
+
header.source_port, header.dest_port,
|
327 |
+
header.seq_num, header.ack_num,
|
328 |
+
offset_reserved_flags, header.window_size,
|
329 |
+
0, header.urgent_pointer # Checksum = 0 for calculation
|
330 |
+
)
|
331 |
+
|
332 |
+
# Add options and padding
|
333 |
+
if header.options:
|
334 |
+
header_data += header.options
|
335 |
+
# Pad to 32-bit boundary
|
336 |
+
padding_needed = (header.data_offset * 4) - len(header_data)
|
337 |
+
if padding_needed > 0:
|
338 |
+
header_data += b'\x00' * padding_needed
|
339 |
+
|
340 |
+
# Calculate TCP checksum with pseudo-header
|
341 |
+
pseudo_header = struct.pack(
|
342 |
+
'!4s4sBBH',
|
343 |
+
socket.inet_aton(source_ip),
|
344 |
+
socket.inet_aton(dest_ip),
|
345 |
+
0, IPProtocol.TCP.value,
|
346 |
+
len(header_data) + len(payload)
|
347 |
+
)
|
348 |
+
|
349 |
+
checksum_data = pseudo_header + header_data + payload
|
350 |
+
checksum = cls.calculate_checksum(checksum_data)
|
351 |
+
|
352 |
+
# Insert checksum
|
353 |
+
header_data = header_data[:16] + struct.pack('!H', checksum) + header_data[18:]
|
354 |
+
|
355 |
+
return header_data
|
356 |
+
|
357 |
+
@classmethod
|
358 |
+
def build_udp_header(cls, header: UDPHeader, source_ip: str, dest_ip: str, payload: bytes) -> bytes:
|
359 |
+
"""Build UDP header as bytes with checksum"""
|
360 |
+
header.length = 8 + len(payload)
|
361 |
+
|
362 |
+
# Build header without checksum
|
363 |
+
header_data = struct.pack(
|
364 |
+
'!HHHH',
|
365 |
+
header.source_port, header.dest_port,
|
366 |
+
header.length, 0 # Checksum = 0 for calculation
|
367 |
+
)
|
368 |
+
|
369 |
+
# Calculate UDP checksum with pseudo-header (optional for IPv4)
|
370 |
+
if header.checksum != 0: # If checksum is required
|
371 |
+
pseudo_header = struct.pack(
|
372 |
+
'!4s4sBBH',
|
373 |
+
socket.inet_aton(source_ip),
|
374 |
+
socket.inet_aton(dest_ip),
|
375 |
+
0, IPProtocol.UDP.value,
|
376 |
+
header.length
|
377 |
+
)
|
378 |
+
|
379 |
+
checksum_data = pseudo_header + header_data + payload
|
380 |
+
checksum = cls.calculate_checksum(checksum_data)
|
381 |
+
|
382 |
+
# Insert checksum
|
383 |
+
header_data = header_data[:6] + struct.pack('!H', checksum) + header_data[8:]
|
384 |
+
|
385 |
+
return header_data
|
386 |
+
|
387 |
+
@classmethod
|
388 |
+
def build_packet(cls, ip_header: IPv4Header, transport_header: Optional[object] = None, payload: bytes = b'') -> bytes:
|
389 |
+
"""Build complete packet"""
|
390 |
+
transport_data = b''
|
391 |
+
|
392 |
+
# Build transport header
|
393 |
+
if transport_header:
|
394 |
+
if isinstance(transport_header, TCPHeader):
|
395 |
+
transport_data = cls.build_tcp_header(
|
396 |
+
transport_header, ip_header.source_ip, ip_header.dest_ip, payload
|
397 |
+
)
|
398 |
+
elif isinstance(transport_header, UDPHeader):
|
399 |
+
transport_data = cls.build_udp_header(
|
400 |
+
transport_header, ip_header.source_ip, ip_header.dest_ip, payload
|
401 |
+
)
|
402 |
+
|
403 |
+
# Update IP header total length
|
404 |
+
ip_header.total_length = ip_header.header_length + len(transport_data) + len(payload)
|
405 |
+
|
406 |
+
# Build IP header
|
407 |
+
ip_data = cls.build_ipv4_header(ip_header)
|
408 |
+
|
409 |
+
# Combine all parts
|
410 |
+
return ip_data + transport_data + payload
|
411 |
+
|
412 |
+
|
413 |
+
class PacketFragmenter:
|
414 |
+
"""Handle packet fragmentation and reassembly"""
|
415 |
+
|
416 |
+
def __init__(self, mtu: int = 1500):
|
417 |
+
self.mtu = mtu
|
418 |
+
self.fragments: Dict[Tuple[str, str, int], List[Tuple[int, bytes]]] = {} # (src, dst, id) -> [(offset, data)]
|
419 |
+
|
420 |
+
def fragment_packet(self, packet: bytes, mtu: int = None) -> List[bytes]:
|
421 |
+
"""Fragment a packet if it exceeds MTU"""
|
422 |
+
if mtu is None:
|
423 |
+
mtu = self.mtu
|
424 |
+
|
425 |
+
if len(packet) <= mtu:
|
426 |
+
return [packet]
|
427 |
+
|
428 |
+
# Parse original packet
|
429 |
+
parsed = IPParser.parse_packet(packet)
|
430 |
+
ip_header = parsed.ip_header
|
431 |
+
|
432 |
+
# Don't fragment if DF flag is set
|
433 |
+
if ip_header.dont_fragment:
|
434 |
+
raise ValueError("Packet too large and Don't Fragment flag is set")
|
435 |
+
|
436 |
+
fragments = []
|
437 |
+
payload_mtu = mtu - ip_header.header_length
|
438 |
+
payload_mtu = (payload_mtu // 8) * 8 # Must be multiple of 8 bytes
|
439 |
+
|
440 |
+
# Get the payload to fragment (everything after IP header)
|
441 |
+
payload_start = ip_header.header_length
|
442 |
+
payload = packet[payload_start:]
|
443 |
+
|
444 |
+
offset = 0
|
445 |
+
while offset < len(payload):
|
446 |
+
# Create fragment
|
447 |
+
fragment_payload = payload[offset:offset + payload_mtu]
|
448 |
+
|
449 |
+
# Create new IP header for fragment
|
450 |
+
frag_header = IPv4Header(
|
451 |
+
version=ip_header.version,
|
452 |
+
ihl=ip_header.ihl,
|
453 |
+
tos=ip_header.tos,
|
454 |
+
identification=ip_header.identification,
|
455 |
+
ttl=ip_header.ttl,
|
456 |
+
protocol=ip_header.protocol,
|
457 |
+
source_ip=ip_header.source_ip,
|
458 |
+
dest_ip=ip_header.dest_ip,
|
459 |
+
options=ip_header.options
|
460 |
+
)
|
461 |
+
|
462 |
+
# Set fragment offset and flags
|
463 |
+
frag_header.fragment_offset = (ip_header.fragment_offset * 8 + offset) // 8
|
464 |
+
frag_header.flags = ip_header.flags
|
465 |
+
|
466 |
+
# Set More Fragments flag if not last fragment
|
467 |
+
if offset + len(fragment_payload) < len(payload):
|
468 |
+
frag_header.flags |= 0x1 # More Fragments
|
469 |
+
else:
|
470 |
+
frag_header.flags &= ~0x1 # Clear More Fragments
|
471 |
+
|
472 |
+
# Build fragment
|
473 |
+
fragment = IPParser.build_packet(frag_header, payload=fragment_payload)
|
474 |
+
fragments.append(fragment)
|
475 |
+
|
476 |
+
offset += len(fragment_payload)
|
477 |
+
|
478 |
+
return fragments
|
479 |
+
|
480 |
+
def reassemble_packet(self, packet: bytes) -> Optional[bytes]:
|
481 |
+
"""Reassemble fragmented packet"""
|
482 |
+
parsed = IPParser.parse_packet(packet)
|
483 |
+
ip_header = parsed.ip_header
|
484 |
+
|
485 |
+
# If not a fragment, return as-is
|
486 |
+
if not ip_header.is_fragment:
|
487 |
+
return packet
|
488 |
+
|
489 |
+
# Create fragment key
|
490 |
+
key = (ip_header.source_ip, ip_header.dest_ip, ip_header.identification)
|
491 |
+
|
492 |
+
# Store fragment
|
493 |
+
if key not in self.fragments:
|
494 |
+
self.fragments[key] = []
|
495 |
+
|
496 |
+
payload_start = ip_header.header_length
|
497 |
+
fragment_data = packet[payload_start:]
|
498 |
+
self.fragments[key].append((ip_header.fragment_offset * 8, fragment_data))
|
499 |
+
|
500 |
+
# Check if we have all fragments
|
501 |
+
fragments = sorted(self.fragments[key])
|
502 |
+
|
503 |
+
# Verify we have contiguous fragments starting from 0
|
504 |
+
expected_offset = 0
|
505 |
+
complete_payload = b''
|
506 |
+
|
507 |
+
for offset, data in fragments:
|
508 |
+
if offset != expected_offset:
|
509 |
+
return None # Missing fragment
|
510 |
+
|
511 |
+
complete_payload += data
|
512 |
+
expected_offset += len(data)
|
513 |
+
|
514 |
+
# Check if last fragment (no More Fragments flag)
|
515 |
+
last_fragment = None
|
516 |
+
for frag_packet in [packet]: # We only have current packet, need to track all
|
517 |
+
frag_parsed = IPParser.parse_packet(frag_packet)
|
518 |
+
if not frag_parsed.ip_header.more_fragments:
|
519 |
+
last_fragment = frag_parsed
|
520 |
+
break
|
521 |
+
|
522 |
+
if last_fragment is None:
|
523 |
+
return None # Don't have last fragment yet
|
524 |
+
|
525 |
+
# Reassemble complete packet
|
526 |
+
complete_header = IPv4Header(
|
527 |
+
version=ip_header.version,
|
528 |
+
ihl=ip_header.ihl,
|
529 |
+
tos=ip_header.tos,
|
530 |
+
identification=ip_header.identification,
|
531 |
+
flags=ip_header.flags & ~0x1, # Clear More Fragments
|
532 |
+
fragment_offset=0,
|
533 |
+
ttl=ip_header.ttl,
|
534 |
+
protocol=ip_header.protocol,
|
535 |
+
source_ip=ip_header.source_ip,
|
536 |
+
dest_ip=ip_header.dest_ip,
|
537 |
+
options=ip_header.options
|
538 |
+
)
|
539 |
+
|
540 |
+
complete_packet = IPParser.build_packet(complete_header, payload=complete_payload)
|
541 |
+
|
542 |
+
# Clean up fragments
|
543 |
+
del self.fragments[key]
|
544 |
+
|
545 |
+
return complete_packet
|
546 |
+
|
core/logger.py
ADDED
@@ -0,0 +1,555 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Logger Module
|
3 |
+
|
4 |
+
Centralized logging system for the virtual ISP stack:
|
5 |
+
- Structured logging with multiple levels
|
6 |
+
- Log aggregation and filtering
|
7 |
+
- Real-time log streaming
|
8 |
+
- Log persistence and rotation
|
9 |
+
"""
|
10 |
+
|
11 |
+
import logging
|
12 |
+
import logging.handlers
|
13 |
+
import time
|
14 |
+
import threading
|
15 |
+
import json
|
16 |
+
import os
|
17 |
+
from typing import Dict, List, Optional, Any, Callable
|
18 |
+
from dataclasses import dataclass, asdict
|
19 |
+
from enum import Enum
|
20 |
+
from collections import deque
|
21 |
+
import queue
|
22 |
+
|
23 |
+
|
24 |
+
class LogLevel(Enum):
|
25 |
+
DEBUG = "DEBUG"
|
26 |
+
INFO = "INFO"
|
27 |
+
WARNING = "WARNING"
|
28 |
+
ERROR = "ERROR"
|
29 |
+
CRITICAL = "CRITICAL"
|
30 |
+
|
31 |
+
|
32 |
+
class LogCategory(Enum):
|
33 |
+
SYSTEM = "SYSTEM"
|
34 |
+
DHCP = "DHCP"
|
35 |
+
NAT = "NAT"
|
36 |
+
FIREWALL = "FIREWALL"
|
37 |
+
TCP = "TCP"
|
38 |
+
ROUTER = "ROUTER"
|
39 |
+
BRIDGE = "BRIDGE"
|
40 |
+
SOCKET = "SOCKET"
|
41 |
+
SESSION = "SESSION"
|
42 |
+
SECURITY = "SECURITY"
|
43 |
+
PERFORMANCE = "PERFORMANCE"
|
44 |
+
|
45 |
+
|
46 |
+
@dataclass
|
47 |
+
class LogEntry:
|
48 |
+
"""Structured log entry"""
|
49 |
+
timestamp: float
|
50 |
+
level: str
|
51 |
+
category: str
|
52 |
+
module: str
|
53 |
+
message: str
|
54 |
+
session_id: Optional[str] = None
|
55 |
+
client_id: Optional[str] = None
|
56 |
+
source_ip: Optional[str] = None
|
57 |
+
dest_ip: Optional[str] = None
|
58 |
+
protocol: Optional[str] = None
|
59 |
+
metadata: Dict[str, Any] = None
|
60 |
+
|
61 |
+
def __post_init__(self):
|
62 |
+
if self.timestamp == 0:
|
63 |
+
self.timestamp = time.time()
|
64 |
+
if self.metadata is None:
|
65 |
+
self.metadata = {}
|
66 |
+
|
67 |
+
def to_dict(self) -> Dict:
|
68 |
+
"""Convert to dictionary"""
|
69 |
+
return asdict(self)
|
70 |
+
|
71 |
+
def to_json(self) -> str:
|
72 |
+
"""Convert to JSON string"""
|
73 |
+
return json.dumps(self.to_dict(), default=str)
|
74 |
+
|
75 |
+
|
76 |
+
class LogFilter:
|
77 |
+
"""Log filtering class"""
|
78 |
+
|
79 |
+
def __init__(self):
|
80 |
+
self.level_filter: Optional[LogLevel] = None
|
81 |
+
self.category_filter: Optional[LogCategory] = None
|
82 |
+
self.module_filter: Optional[str] = None
|
83 |
+
self.session_filter: Optional[str] = None
|
84 |
+
self.client_filter: Optional[str] = None
|
85 |
+
self.ip_filter: Optional[str] = None
|
86 |
+
self.text_filter: Optional[str] = None
|
87 |
+
self.time_range: Optional[tuple] = None
|
88 |
+
|
89 |
+
def matches(self, entry: LogEntry) -> bool:
|
90 |
+
"""Check if log entry matches filter criteria"""
|
91 |
+
# Level filter
|
92 |
+
if self.level_filter:
|
93 |
+
entry_level_value = getattr(logging, entry.level)
|
94 |
+
filter_level_value = getattr(logging, self.level_filter.value)
|
95 |
+
if entry_level_value < filter_level_value:
|
96 |
+
return False
|
97 |
+
|
98 |
+
# Category filter
|
99 |
+
if self.category_filter and entry.category != self.category_filter.value:
|
100 |
+
return False
|
101 |
+
|
102 |
+
# Module filter
|
103 |
+
if self.module_filter and self.module_filter.lower() not in entry.module.lower():
|
104 |
+
return False
|
105 |
+
|
106 |
+
# Session filter
|
107 |
+
if self.session_filter and entry.session_id != self.session_filter:
|
108 |
+
return False
|
109 |
+
|
110 |
+
# Client filter
|
111 |
+
if self.client_filter and entry.client_id != self.client_filter:
|
112 |
+
return False
|
113 |
+
|
114 |
+
# IP filter
|
115 |
+
if self.ip_filter:
|
116 |
+
if (entry.source_ip != self.ip_filter and
|
117 |
+
entry.dest_ip != self.ip_filter):
|
118 |
+
return False
|
119 |
+
|
120 |
+
# Text filter
|
121 |
+
if self.text_filter and self.text_filter.lower() not in entry.message.lower():
|
122 |
+
return False
|
123 |
+
|
124 |
+
# Time range filter
|
125 |
+
if self.time_range:
|
126 |
+
start_time, end_time = self.time_range
|
127 |
+
if not (start_time <= entry.timestamp <= end_time):
|
128 |
+
return False
|
129 |
+
|
130 |
+
return True
|
131 |
+
|
132 |
+
|
133 |
+
class LogSubscriber:
|
134 |
+
"""Log subscriber for real-time streaming"""
|
135 |
+
|
136 |
+
def __init__(self, subscriber_id: str, callback: Callable[[LogEntry], None],
|
137 |
+
log_filter: Optional[LogFilter] = None):
|
138 |
+
self.subscriber_id = subscriber_id
|
139 |
+
self.callback = callback
|
140 |
+
self.filter = log_filter or LogFilter()
|
141 |
+
self.created_time = time.time()
|
142 |
+
self.message_count = 0
|
143 |
+
self.last_message_time = None
|
144 |
+
self.is_active = True
|
145 |
+
|
146 |
+
def send_log(self, entry: LogEntry) -> bool:
|
147 |
+
"""Send log entry to subscriber if it matches filter"""
|
148 |
+
if not self.is_active:
|
149 |
+
return False
|
150 |
+
|
151 |
+
if self.filter.matches(entry):
|
152 |
+
try:
|
153 |
+
self.callback(entry)
|
154 |
+
self.message_count += 1
|
155 |
+
self.last_message_time = time.time()
|
156 |
+
return True
|
157 |
+
except Exception as e:
|
158 |
+
print(f"Error sending log to subscriber {self.subscriber_id}: {e}")
|
159 |
+
self.is_active = False
|
160 |
+
return False
|
161 |
+
|
162 |
+
return False
|
163 |
+
|
164 |
+
|
165 |
+
class VirtualISPLogger:
|
166 |
+
"""Centralized logger for Virtual ISP stack"""
|
167 |
+
|
168 |
+
def __init__(self, config: Dict):
|
169 |
+
self.config = config
|
170 |
+
self.log_entries: deque = deque(maxlen=config.get('max_memory_logs', 10000))
|
171 |
+
self.subscribers: Dict[str, LogSubscriber] = {}
|
172 |
+
self.lock = threading.Lock()
|
173 |
+
|
174 |
+
# Configuration
|
175 |
+
self.log_level = LogLevel(config.get('log_level', 'INFO'))
|
176 |
+
self.log_to_file = config.get('log_to_file', True)
|
177 |
+
self.log_file_path = config.get('log_file_path', '/tmp/virtual_isp.log')
|
178 |
+
self.log_file_max_size = config.get('log_file_max_size', 10 * 1024 * 1024) # 10MB
|
179 |
+
self.log_file_backup_count = config.get('log_file_backup_count', 5)
|
180 |
+
self.log_to_console = config.get('log_to_console', True)
|
181 |
+
self.structured_logging = config.get('structured_logging', True)
|
182 |
+
|
183 |
+
# Statistics
|
184 |
+
self.stats = {
|
185 |
+
'total_logs': 0,
|
186 |
+
'logs_by_level': {level.value: 0 for level in LogLevel},
|
187 |
+
'logs_by_category': {cat.value: 0 for cat in LogCategory},
|
188 |
+
'active_subscribers': 0,
|
189 |
+
'file_logs_written': 0,
|
190 |
+
'console_logs_written': 0,
|
191 |
+
'dropped_logs': 0
|
192 |
+
}
|
193 |
+
|
194 |
+
# Setup logging
|
195 |
+
self._setup_logging()
|
196 |
+
|
197 |
+
# Background processing
|
198 |
+
self.running = False
|
199 |
+
self.log_queue = queue.Queue()
|
200 |
+
self.processing_thread = None
|
201 |
+
|
202 |
+
def _setup_logging(self):
|
203 |
+
"""Setup Python logging infrastructure"""
|
204 |
+
# Create logger
|
205 |
+
self.logger = logging.getLogger('virtual_isp')
|
206 |
+
self.logger.setLevel(getattr(logging, self.log_level.value))
|
207 |
+
|
208 |
+
# Remove existing handlers
|
209 |
+
for handler in self.logger.handlers[:]:
|
210 |
+
self.logger.removeHandler(handler)
|
211 |
+
|
212 |
+
# Console handler
|
213 |
+
if self.log_to_console:
|
214 |
+
console_handler = logging.StreamHandler()
|
215 |
+
if self.structured_logging:
|
216 |
+
console_formatter = logging.Formatter(
|
217 |
+
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
218 |
+
)
|
219 |
+
else:
|
220 |
+
console_formatter = logging.Formatter('%(message)s')
|
221 |
+
console_handler.setFormatter(console_formatter)
|
222 |
+
self.logger.addHandler(console_handler)
|
223 |
+
|
224 |
+
# File handler with rotation
|
225 |
+
if self.log_to_file:
|
226 |
+
# Ensure log directory exists
|
227 |
+
log_dir = os.path.dirname(self.log_file_path)
|
228 |
+
if log_dir and not os.path.exists(log_dir):
|
229 |
+
os.makedirs(log_dir, exist_ok=True)
|
230 |
+
|
231 |
+
file_handler = logging.handlers.RotatingFileHandler(
|
232 |
+
self.log_file_path,
|
233 |
+
maxBytes=self.log_file_max_size,
|
234 |
+
backupCount=self.log_file_backup_count
|
235 |
+
)
|
236 |
+
|
237 |
+
if self.structured_logging:
|
238 |
+
file_formatter = logging.Formatter(
|
239 |
+
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
240 |
+
)
|
241 |
+
else:
|
242 |
+
file_formatter = logging.Formatter('%(message)s')
|
243 |
+
|
244 |
+
file_handler.setFormatter(file_formatter)
|
245 |
+
self.logger.addHandler(file_handler)
|
246 |
+
|
247 |
+
def _process_log_queue(self):
|
248 |
+
"""Background thread to process log queue"""
|
249 |
+
while self.running:
|
250 |
+
try:
|
251 |
+
# Get log entry from queue (with timeout)
|
252 |
+
try:
|
253 |
+
entry = self.log_queue.get(timeout=1.0)
|
254 |
+
except queue.Empty:
|
255 |
+
continue
|
256 |
+
|
257 |
+
# Store in memory
|
258 |
+
with self.lock:
|
259 |
+
self.log_entries.append(entry)
|
260 |
+
|
261 |
+
# Send to subscribers
|
262 |
+
inactive_subscribers = []
|
263 |
+
with self.lock:
|
264 |
+
for subscriber_id, subscriber in self.subscribers.items():
|
265 |
+
if not subscriber.send_log(entry):
|
266 |
+
inactive_subscribers.append(subscriber_id)
|
267 |
+
|
268 |
+
# Remove inactive subscribers
|
269 |
+
for subscriber_id in inactive_subscribers:
|
270 |
+
self.remove_subscriber(subscriber_id)
|
271 |
+
|
272 |
+
# Update statistics
|
273 |
+
self.stats['total_logs'] += 1
|
274 |
+
self.stats['logs_by_level'][entry.level] += 1
|
275 |
+
self.stats['logs_by_category'][entry.category] += 1
|
276 |
+
|
277 |
+
# Mark task as done
|
278 |
+
self.log_queue.task_done()
|
279 |
+
|
280 |
+
except Exception as e:
|
281 |
+
print(f"Error processing log queue: {e}")
|
282 |
+
time.sleep(1)
|
283 |
+
|
284 |
+
def log(self, level: LogLevel, category: LogCategory, module: str, message: str,
|
285 |
+
session_id: Optional[str] = None, client_id: Optional[str] = None,
|
286 |
+
source_ip: Optional[str] = None, dest_ip: Optional[str] = None,
|
287 |
+
protocol: Optional[str] = None, **metadata):
|
288 |
+
"""Log a message"""
|
289 |
+
# Check if we should log this level
|
290 |
+
level_value = getattr(logging, level.value)
|
291 |
+
min_level_value = getattr(logging, self.log_level.value)
|
292 |
+
if level_value < min_level_value:
|
293 |
+
return
|
294 |
+
|
295 |
+
# Create log entry
|
296 |
+
entry = LogEntry(
|
297 |
+
timestamp=time.time(),
|
298 |
+
level=level.value,
|
299 |
+
category=category.value,
|
300 |
+
module=module,
|
301 |
+
message=message,
|
302 |
+
session_id=session_id,
|
303 |
+
client_id=client_id,
|
304 |
+
source_ip=source_ip,
|
305 |
+
dest_ip=dest_ip,
|
306 |
+
protocol=protocol,
|
307 |
+
metadata=metadata
|
308 |
+
)
|
309 |
+
|
310 |
+
# Add to queue for background processing
|
311 |
+
try:
|
312 |
+
self.log_queue.put_nowait(entry)
|
313 |
+
except queue.Full:
|
314 |
+
self.stats['dropped_logs'] += 1
|
315 |
+
|
316 |
+
# Also log through Python logging system
|
317 |
+
if self.structured_logging:
|
318 |
+
log_data = entry.to_dict()
|
319 |
+
log_message = f"{message} | {json.dumps(log_data, default=str)}"
|
320 |
+
else:
|
321 |
+
log_message = message
|
322 |
+
|
323 |
+
# Log to Python logger
|
324 |
+
python_logger_level = getattr(logging, level.value)
|
325 |
+
self.logger.log(python_logger_level, log_message)
|
326 |
+
|
327 |
+
# Update console/file stats
|
328 |
+
if self.log_to_console:
|
329 |
+
self.stats['console_logs_written'] += 1
|
330 |
+
if self.log_to_file:
|
331 |
+
self.stats['file_logs_written'] += 1
|
332 |
+
|
333 |
+
def debug(self, category: LogCategory, module: str, message: str, **kwargs):
|
334 |
+
"""Log debug message"""
|
335 |
+
self.log(LogLevel.DEBUG, category, module, message, **kwargs)
|
336 |
+
|
337 |
+
def info(self, category: LogCategory, module: str, message: str, **kwargs):
|
338 |
+
"""Log info message"""
|
339 |
+
self.log(LogLevel.INFO, category, module, message, **kwargs)
|
340 |
+
|
341 |
+
def warning(self, category: LogCategory, module: str, message: str, **kwargs):
|
342 |
+
"""Log warning message"""
|
343 |
+
self.log(LogLevel.WARNING, category, module, message, **kwargs)
|
344 |
+
|
345 |
+
def error(self, category: LogCategory, module: str, message: str, **kwargs):
|
346 |
+
"""Log error message"""
|
347 |
+
self.log(LogLevel.ERROR, category, module, message, **kwargs)
|
348 |
+
|
349 |
+
def critical(self, category: LogCategory, module: str, message: str, **kwargs):
|
350 |
+
"""Log critical message"""
|
351 |
+
self.log(LogLevel.CRITICAL, category, module, message, **kwargs)
|
352 |
+
|
353 |
+
def add_subscriber(self, subscriber_id: str, callback: Callable[[LogEntry], None],
|
354 |
+
log_filter: Optional[LogFilter] = None) -> bool:
|
355 |
+
"""Add log subscriber for real-time streaming"""
|
356 |
+
with self.lock:
|
357 |
+
if subscriber_id in self.subscribers:
|
358 |
+
return False
|
359 |
+
|
360 |
+
subscriber = LogSubscriber(subscriber_id, callback, log_filter)
|
361 |
+
self.subscribers[subscriber_id] = subscriber
|
362 |
+
self.stats['active_subscribers'] = len(self.subscribers)
|
363 |
+
|
364 |
+
return True
|
365 |
+
|
366 |
+
def remove_subscriber(self, subscriber_id: str) -> bool:
|
367 |
+
"""Remove log subscriber"""
|
368 |
+
with self.lock:
|
369 |
+
if subscriber_id in self.subscribers:
|
370 |
+
del self.subscribers[subscriber_id]
|
371 |
+
self.stats['active_subscribers'] = len(self.subscribers)
|
372 |
+
return True
|
373 |
+
return False
|
374 |
+
|
375 |
+
def get_logs(self, limit: int = 100, offset: int = 0,
|
376 |
+
log_filter: Optional[LogFilter] = None) -> List[Dict]:
|
377 |
+
"""Get logs with filtering and pagination"""
|
378 |
+
with self.lock:
|
379 |
+
# Convert deque to list for easier manipulation
|
380 |
+
all_logs = list(self.log_entries)
|
381 |
+
|
382 |
+
# Apply filter
|
383 |
+
if log_filter:
|
384 |
+
filtered_logs = [entry for entry in all_logs if log_filter.matches(entry)]
|
385 |
+
else:
|
386 |
+
filtered_logs = all_logs
|
387 |
+
|
388 |
+
# Sort by timestamp (newest first)
|
389 |
+
filtered_logs.sort(key=lambda x: x.timestamp, reverse=True)
|
390 |
+
|
391 |
+
# Apply pagination
|
392 |
+
paginated_logs = filtered_logs[offset:offset + limit]
|
393 |
+
|
394 |
+
return [entry.to_dict() for entry in paginated_logs]
|
395 |
+
|
396 |
+
def search_logs(self, query: str, limit: int = 100) -> List[Dict]:
|
397 |
+
"""Search logs by text query"""
|
398 |
+
log_filter = LogFilter()
|
399 |
+
log_filter.text_filter = query
|
400 |
+
|
401 |
+
return self.get_logs(limit=limit, log_filter=log_filter)
|
402 |
+
|
403 |
+
def get_logs_by_session(self, session_id: str, limit: int = 100) -> List[Dict]:
|
404 |
+
"""Get logs for specific session"""
|
405 |
+
log_filter = LogFilter()
|
406 |
+
log_filter.session_filter = session_id
|
407 |
+
|
408 |
+
return self.get_logs(limit=limit, log_filter=log_filter)
|
409 |
+
|
410 |
+
def get_logs_by_client(self, client_id: str, limit: int = 100) -> List[Dict]:
|
411 |
+
"""Get logs for specific client"""
|
412 |
+
log_filter = LogFilter()
|
413 |
+
log_filter.client_filter = client_id
|
414 |
+
|
415 |
+
return self.get_logs(limit=limit, log_filter=log_filter)
|
416 |
+
|
417 |
+
def get_logs_by_ip(self, ip_address: str, limit: int = 100) -> List[Dict]:
|
418 |
+
"""Get logs for specific IP address"""
|
419 |
+
log_filter = LogFilter()
|
420 |
+
log_filter.ip_filter = ip_address
|
421 |
+
|
422 |
+
return self.get_logs(limit=limit, log_filter=log_filter)
|
423 |
+
|
424 |
+
def get_recent_errors(self, limit: int = 50) -> List[Dict]:
|
425 |
+
"""Get recent error and critical logs"""
|
426 |
+
log_filter = LogFilter()
|
427 |
+
log_filter.level_filter = LogLevel.ERROR
|
428 |
+
|
429 |
+
return self.get_logs(limit=limit, log_filter=log_filter)
|
430 |
+
|
431 |
+
def clear_logs(self):
|
432 |
+
"""Clear all logs from memory"""
|
433 |
+
with self.lock:
|
434 |
+
self.log_entries.clear()
|
435 |
+
|
436 |
+
def get_stats(self) -> Dict:
|
437 |
+
"""Get logging statistics"""
|
438 |
+
with self.lock:
|
439 |
+
stats = self.stats.copy()
|
440 |
+
stats['memory_logs_count'] = len(self.log_entries)
|
441 |
+
stats['active_subscribers'] = len(self.subscribers)
|
442 |
+
stats['queue_size'] = self.log_queue.qsize()
|
443 |
+
|
444 |
+
return stats
|
445 |
+
|
446 |
+
def reset_stats(self):
|
447 |
+
"""Reset logging statistics"""
|
448 |
+
self.stats = {
|
449 |
+
'total_logs': 0,
|
450 |
+
'logs_by_level': {level.value: 0 for level in LogLevel},
|
451 |
+
'logs_by_category': {cat.value: 0 for cat in LogCategory},
|
452 |
+
'active_subscribers': len(self.subscribers),
|
453 |
+
'file_logs_written': 0,
|
454 |
+
'console_logs_written': 0,
|
455 |
+
'dropped_logs': 0
|
456 |
+
}
|
457 |
+
|
458 |
+
def export_logs(self, format: str = 'json', log_filter: Optional[LogFilter] = None) -> str:
|
459 |
+
"""Export logs in specified format"""
|
460 |
+
logs = self.get_logs(limit=10000, log_filter=log_filter)
|
461 |
+
|
462 |
+
if format == 'json':
|
463 |
+
return json.dumps(logs, indent=2, default=str)
|
464 |
+
elif format == 'csv':
|
465 |
+
import csv
|
466 |
+
import io
|
467 |
+
|
468 |
+
output = io.StringIO()
|
469 |
+
if logs:
|
470 |
+
writer = csv.DictWriter(output, fieldnames=logs[0].keys())
|
471 |
+
writer.writeheader()
|
472 |
+
writer.writerows(logs)
|
473 |
+
|
474 |
+
return output.getvalue()
|
475 |
+
else:
|
476 |
+
raise ValueError(f"Unsupported export format: {format}")
|
477 |
+
|
478 |
+
def set_log_level(self, level: LogLevel):
|
479 |
+
"""Set logging level"""
|
480 |
+
self.log_level = level
|
481 |
+
self.logger.setLevel(getattr(logging, level.value))
|
482 |
+
|
483 |
+
def start(self):
|
484 |
+
"""Start logger"""
|
485 |
+
self.running = True
|
486 |
+
self.processing_thread = threading.Thread(target=self._process_log_queue, daemon=True)
|
487 |
+
self.processing_thread.start()
|
488 |
+
|
489 |
+
self.info(LogCategory.SYSTEM, 'logger', 'Virtual ISP Logger started')
|
490 |
+
|
491 |
+
def stop(self):
|
492 |
+
"""Stop logger"""
|
493 |
+
self.info(LogCategory.SYSTEM, 'logger', 'Virtual ISP Logger stopping')
|
494 |
+
|
495 |
+
self.running = False
|
496 |
+
|
497 |
+
# Wait for queue to be processed
|
498 |
+
self.log_queue.join()
|
499 |
+
|
500 |
+
# Wait for processing thread
|
501 |
+
if self.processing_thread:
|
502 |
+
self.processing_thread.join()
|
503 |
+
|
504 |
+
# Remove all subscribers
|
505 |
+
with self.lock:
|
506 |
+
self.subscribers.clear()
|
507 |
+
|
508 |
+
print("Virtual ISP Logger stopped")
|
509 |
+
|
510 |
+
|
511 |
+
# Global logger instance
|
512 |
+
_global_logger: Optional[VirtualISPLogger] = None
|
513 |
+
|
514 |
+
|
515 |
+
def get_logger() -> Optional[VirtualISPLogger]:
|
516 |
+
"""Get global logger instance"""
|
517 |
+
return _global_logger
|
518 |
+
|
519 |
+
|
520 |
+
def init_logger(config: Dict) -> VirtualISPLogger:
|
521 |
+
"""Initialize global logger"""
|
522 |
+
global _global_logger
|
523 |
+
_global_logger = VirtualISPLogger(config)
|
524 |
+
return _global_logger
|
525 |
+
|
526 |
+
|
527 |
+
def log_debug(category: LogCategory, module: str, message: str, **kwargs):
|
528 |
+
"""Global debug logging function"""
|
529 |
+
if _global_logger:
|
530 |
+
_global_logger.debug(category, module, message, **kwargs)
|
531 |
+
|
532 |
+
|
533 |
+
def log_info(category: LogCategory, module: str, message: str, **kwargs):
|
534 |
+
"""Global info logging function"""
|
535 |
+
if _global_logger:
|
536 |
+
_global_logger.info(category, module, message, **kwargs)
|
537 |
+
|
538 |
+
|
539 |
+
def log_warning(category: LogCategory, module: str, message: str, **kwargs):
|
540 |
+
"""Global warning logging function"""
|
541 |
+
if _global_logger:
|
542 |
+
_global_logger.warning(category, module, message, **kwargs)
|
543 |
+
|
544 |
+
|
545 |
+
def log_error(category: LogCategory, module: str, message: str, **kwargs):
|
546 |
+
"""Global error logging function"""
|
547 |
+
if _global_logger:
|
548 |
+
_global_logger.error(category, module, message, **kwargs)
|
549 |
+
|
550 |
+
|
551 |
+
def log_critical(category: LogCategory, module: str, message: str, **kwargs):
|
552 |
+
"""Global critical logging function"""
|
553 |
+
if _global_logger:
|
554 |
+
_global_logger.critical(category, module, message, **kwargs)
|
555 |
+
|
core/nat_engine.py
ADDED
@@ -0,0 +1,516 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
NAT Engine Module
|
3 |
+
|
4 |
+
Implements Network Address Translation:
|
5 |
+
- Map (virtualIP, virtualPort) to (hostIP, hostPort)
|
6 |
+
- Maintain connection tracking table
|
7 |
+
- Handle port allocation and deallocation
|
8 |
+
- Support connection state tracking
|
9 |
+
"""
|
10 |
+
|
11 |
+
import time
|
12 |
+
import threading
|
13 |
+
import socket
|
14 |
+
import random
|
15 |
+
from typing import Dict, Optional, Tuple, Set
|
16 |
+
from dataclasses import dataclass
|
17 |
+
from enum import Enum
|
18 |
+
|
19 |
+
from .ip_parser import IPProtocol
|
20 |
+
|
21 |
+
|
22 |
+
class NATType(Enum):
|
23 |
+
SNAT = "SNAT" # Source NAT
|
24 |
+
DNAT = "DNAT" # Destination NAT
|
25 |
+
|
26 |
+
|
27 |
+
@dataclass
|
28 |
+
class NATSession:
|
29 |
+
"""Represents a NAT session"""
|
30 |
+
# Virtual (internal) endpoint
|
31 |
+
virtual_ip: str
|
32 |
+
virtual_port: int
|
33 |
+
|
34 |
+
# Real (external) endpoint
|
35 |
+
real_ip: str
|
36 |
+
real_port: int
|
37 |
+
|
38 |
+
# Host (translated) endpoint
|
39 |
+
host_ip: str
|
40 |
+
host_port: int
|
41 |
+
|
42 |
+
# Session metadata
|
43 |
+
protocol: str # TCP or UDP
|
44 |
+
nat_type: NATType
|
45 |
+
created_time: float
|
46 |
+
last_activity: float
|
47 |
+
bytes_in: int = 0
|
48 |
+
bytes_out: int = 0
|
49 |
+
packets_in: int = 0
|
50 |
+
packets_out: int = 0
|
51 |
+
|
52 |
+
@property
|
53 |
+
def session_id(self) -> str:
|
54 |
+
"""Get unique session identifier"""
|
55 |
+
return f"{self.virtual_ip}:{self.virtual_port}-{self.real_ip}:{self.real_port}-{self.protocol}"
|
56 |
+
|
57 |
+
@property
|
58 |
+
def is_expired(self) -> bool:
|
59 |
+
"""Check if session has expired"""
|
60 |
+
timeout = 300 if self.protocol == 'TCP' else 60 # 5 min for TCP, 1 min for UDP
|
61 |
+
return time.time() - self.last_activity > timeout
|
62 |
+
|
63 |
+
@property
|
64 |
+
def duration(self) -> float:
|
65 |
+
"""Get session duration in seconds"""
|
66 |
+
return time.time() - self.created_time
|
67 |
+
|
68 |
+
def update_activity(self, bytes_transferred: int = 0, direction: str = 'out'):
|
69 |
+
"""Update session activity"""
|
70 |
+
self.last_activity = time.time()
|
71 |
+
|
72 |
+
if direction == 'out':
|
73 |
+
self.bytes_out += bytes_transferred
|
74 |
+
self.packets_out += 1
|
75 |
+
else:
|
76 |
+
self.bytes_in += bytes_transferred
|
77 |
+
self.packets_in += 1
|
78 |
+
|
79 |
+
|
80 |
+
class PortPool:
|
81 |
+
"""Manages available ports for NAT"""
|
82 |
+
|
83 |
+
def __init__(self, start_port: int = 10000, end_port: int = 65535):
|
84 |
+
self.start_port = start_port
|
85 |
+
self.end_port = end_port
|
86 |
+
self.available_ports: Set[int] = set(range(start_port, end_port + 1))
|
87 |
+
self.allocated_ports: Dict[int, str] = {} # port -> session_id
|
88 |
+
self.lock = threading.Lock()
|
89 |
+
|
90 |
+
def allocate_port(self, session_id: str) -> Optional[int]:
|
91 |
+
"""Allocate a port for a session"""
|
92 |
+
with self.lock:
|
93 |
+
if not self.available_ports:
|
94 |
+
return None
|
95 |
+
|
96 |
+
# Try to get a random port to distribute load
|
97 |
+
port = random.choice(list(self.available_ports))
|
98 |
+
self.available_ports.remove(port)
|
99 |
+
self.allocated_ports[port] = session_id
|
100 |
+
|
101 |
+
return port
|
102 |
+
|
103 |
+
def release_port(self, port: int) -> bool:
|
104 |
+
"""Release a port back to the pool"""
|
105 |
+
with self.lock:
|
106 |
+
if port in self.allocated_ports:
|
107 |
+
del self.allocated_ports[port]
|
108 |
+
if self.start_port <= port <= self.end_port:
|
109 |
+
self.available_ports.add(port)
|
110 |
+
return True
|
111 |
+
return False
|
112 |
+
|
113 |
+
def get_session_for_port(self, port: int) -> Optional[str]:
|
114 |
+
"""Get session ID for a port"""
|
115 |
+
with self.lock:
|
116 |
+
return self.allocated_ports.get(port)
|
117 |
+
|
118 |
+
def get_stats(self) -> Dict:
|
119 |
+
"""Get port pool statistics"""
|
120 |
+
with self.lock:
|
121 |
+
return {
|
122 |
+
'total_ports': self.end_port - self.start_port + 1,
|
123 |
+
'available_ports': len(self.available_ports),
|
124 |
+
'allocated_ports': len(self.allocated_ports),
|
125 |
+
'utilization': len(self.allocated_ports) / (self.end_port - self.start_port + 1)
|
126 |
+
}
|
127 |
+
|
128 |
+
|
129 |
+
class NATEngine:
|
130 |
+
"""Network Address Translation engine"""
|
131 |
+
|
132 |
+
def __init__(self, config: Dict):
|
133 |
+
self.config = config
|
134 |
+
self.sessions: Dict[str, NATSession] = {} # session_id -> session
|
135 |
+
self.virtual_to_session: Dict[Tuple[str, int, str], str] = {} # (vip, vport, proto) -> session_id
|
136 |
+
self.host_to_session: Dict[Tuple[str, int, str], str] = {} # (hip, hport, proto) -> session_id
|
137 |
+
self.lock = threading.Lock()
|
138 |
+
|
139 |
+
# Port pool for outbound connections
|
140 |
+
self.port_pool = PortPool(
|
141 |
+
config.get('port_range_start', 10000),
|
142 |
+
config.get('port_range_end', 65535)
|
143 |
+
)
|
144 |
+
|
145 |
+
# Host IP for outbound connections
|
146 |
+
self.host_ip = config.get('host_ip', self._get_default_host_ip())
|
147 |
+
|
148 |
+
# Session timeout
|
149 |
+
self.session_timeout = config.get('session_timeout', 300)
|
150 |
+
|
151 |
+
# Statistics
|
152 |
+
self.stats = {
|
153 |
+
'total_sessions': 0,
|
154 |
+
'active_sessions': 0,
|
155 |
+
'expired_sessions': 0,
|
156 |
+
'port_exhaustion_events': 0,
|
157 |
+
'bytes_translated': 0,
|
158 |
+
'packets_translated': 0
|
159 |
+
}
|
160 |
+
|
161 |
+
# Cleanup thread
|
162 |
+
self.running = False
|
163 |
+
self.cleanup_thread = None
|
164 |
+
|
165 |
+
def _get_default_host_ip(self) -> str:
|
166 |
+
"""Get default host IP address"""
|
167 |
+
try:
|
168 |
+
# Connect to a remote address to determine local IP
|
169 |
+
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
|
170 |
+
s.connect(('8.8.8.8', 80))
|
171 |
+
return s.getsockname()[0]
|
172 |
+
except Exception:
|
173 |
+
return '127.0.0.1'
|
174 |
+
|
175 |
+
def _cleanup_expired_sessions(self):
|
176 |
+
"""Clean up expired sessions"""
|
177 |
+
current_time = time.time()
|
178 |
+
expired_sessions = []
|
179 |
+
|
180 |
+
with self.lock:
|
181 |
+
for session_id, session in self.sessions.items():
|
182 |
+
if session.is_expired:
|
183 |
+
expired_sessions.append(session_id)
|
184 |
+
|
185 |
+
for session_id in expired_sessions:
|
186 |
+
self._remove_session(session_id)
|
187 |
+
self.stats['expired_sessions'] += 1
|
188 |
+
|
189 |
+
def _remove_session(self, session_id: str):
|
190 |
+
"""Remove a session and clean up resources"""
|
191 |
+
with self.lock:
|
192 |
+
if session_id not in self.sessions:
|
193 |
+
return
|
194 |
+
|
195 |
+
session = self.sessions[session_id]
|
196 |
+
|
197 |
+
# Remove from lookup tables
|
198 |
+
virtual_key = (session.virtual_ip, session.virtual_port, session.protocol)
|
199 |
+
if virtual_key in self.virtual_to_session:
|
200 |
+
del self.virtual_to_session[virtual_key]
|
201 |
+
|
202 |
+
host_key = (session.host_ip, session.host_port, session.protocol)
|
203 |
+
if host_key in self.host_to_session:
|
204 |
+
del self.host_to_session[host_key]
|
205 |
+
|
206 |
+
# Release port
|
207 |
+
self.port_pool.release_port(session.host_port)
|
208 |
+
|
209 |
+
# Remove session
|
210 |
+
del self.sessions[session_id]
|
211 |
+
|
212 |
+
self.stats['active_sessions'] = len(self.sessions)
|
213 |
+
|
214 |
+
def create_outbound_session(self, virtual_ip: str, virtual_port: int,
|
215 |
+
real_ip: str, real_port: int, protocol: str) -> Optional[NATSession]:
|
216 |
+
"""Create NAT session for outbound connection"""
|
217 |
+
# Allocate host port
|
218 |
+
session_id = f"{virtual_ip}:{virtual_port}-{real_ip}:{real_port}-{protocol}"
|
219 |
+
host_port = self.port_pool.allocate_port(session_id)
|
220 |
+
|
221 |
+
if host_port is None:
|
222 |
+
self.stats['port_exhaustion_events'] += 1
|
223 |
+
return None
|
224 |
+
|
225 |
+
# Create session
|
226 |
+
session = NATSession(
|
227 |
+
virtual_ip=virtual_ip,
|
228 |
+
virtual_port=virtual_port,
|
229 |
+
real_ip=real_ip,
|
230 |
+
real_port=real_port,
|
231 |
+
host_ip=self.host_ip,
|
232 |
+
host_port=host_port,
|
233 |
+
protocol=protocol,
|
234 |
+
nat_type=NATType.SNAT,
|
235 |
+
created_time=time.time(),
|
236 |
+
last_activity=time.time()
|
237 |
+
)
|
238 |
+
|
239 |
+
with self.lock:
|
240 |
+
self.sessions[session_id] = session
|
241 |
+
|
242 |
+
# Add to lookup tables
|
243 |
+
virtual_key = (virtual_ip, virtual_port, protocol)
|
244 |
+
self.virtual_to_session[virtual_key] = session_id
|
245 |
+
|
246 |
+
host_key = (self.host_ip, host_port, protocol)
|
247 |
+
self.host_to_session[host_key] = session_id
|
248 |
+
|
249 |
+
self.stats['total_sessions'] += 1
|
250 |
+
self.stats['active_sessions'] = len(self.sessions)
|
251 |
+
|
252 |
+
return session
|
253 |
+
|
254 |
+
def translate_outbound(self, virtual_ip: str, virtual_port: int,
|
255 |
+
real_ip: str, real_port: int, protocol: str) -> Optional[Tuple[str, int]]:
|
256 |
+
"""Translate outbound packet (virtual -> host)"""
|
257 |
+
virtual_key = (virtual_ip, virtual_port, protocol)
|
258 |
+
|
259 |
+
with self.lock:
|
260 |
+
session_id = self.virtual_to_session.get(virtual_key)
|
261 |
+
|
262 |
+
if session_id:
|
263 |
+
session = self.sessions[session_id]
|
264 |
+
session.update_activity(direction='out')
|
265 |
+
return (session.host_ip, session.host_port)
|
266 |
+
else:
|
267 |
+
# Create new session
|
268 |
+
session = self.create_outbound_session(virtual_ip, virtual_port, real_ip, real_port, protocol)
|
269 |
+
if session:
|
270 |
+
return (session.host_ip, session.host_port)
|
271 |
+
|
272 |
+
return None
|
273 |
+
|
274 |
+
def translate_inbound(self, host_ip: str, host_port: int, protocol: str) -> Optional[Tuple[str, int]]:
|
275 |
+
"""Translate inbound packet (host -> virtual)"""
|
276 |
+
host_key = (host_ip, host_port, protocol)
|
277 |
+
|
278 |
+
with self.lock:
|
279 |
+
session_id = self.host_to_session.get(host_key)
|
280 |
+
|
281 |
+
if session_id and session_id in self.sessions:
|
282 |
+
session = self.sessions[session_id]
|
283 |
+
session.update_activity(direction='in')
|
284 |
+
return (session.virtual_ip, session.virtual_port)
|
285 |
+
|
286 |
+
return None
|
287 |
+
|
288 |
+
def get_session_by_virtual(self, virtual_ip: str, virtual_port: int, protocol: str) -> Optional[NATSession]:
|
289 |
+
"""Get session by virtual endpoint"""
|
290 |
+
virtual_key = (virtual_ip, virtual_port, protocol)
|
291 |
+
|
292 |
+
with self.lock:
|
293 |
+
session_id = self.virtual_to_session.get(virtual_key)
|
294 |
+
if session_id and session_id in self.sessions:
|
295 |
+
return self.sessions[session_id]
|
296 |
+
|
297 |
+
return None
|
298 |
+
|
299 |
+
def get_session_by_host(self, host_ip: str, host_port: int, protocol: str) -> Optional[NATSession]:
|
300 |
+
"""Get session by host endpoint"""
|
301 |
+
host_key = (host_ip, host_port, protocol)
|
302 |
+
|
303 |
+
with self.lock:
|
304 |
+
session_id = self.host_to_session.get(host_key)
|
305 |
+
if session_id and session_id in self.sessions:
|
306 |
+
return self.sessions[session_id]
|
307 |
+
|
308 |
+
return None
|
309 |
+
|
310 |
+
def close_session(self, session_id: str) -> bool:
|
311 |
+
"""Manually close a session"""
|
312 |
+
with self.lock:
|
313 |
+
if session_id in self.sessions:
|
314 |
+
self._remove_session(session_id)
|
315 |
+
return True
|
316 |
+
return False
|
317 |
+
|
318 |
+
def close_session_by_virtual(self, virtual_ip: str, virtual_port: int, protocol: str) -> bool:
|
319 |
+
"""Close session by virtual endpoint"""
|
320 |
+
virtual_key = (virtual_ip, virtual_port, protocol)
|
321 |
+
|
322 |
+
with self.lock:
|
323 |
+
session_id = self.virtual_to_session.get(virtual_key)
|
324 |
+
if session_id:
|
325 |
+
self._remove_session(session_id)
|
326 |
+
return True
|
327 |
+
return False
|
328 |
+
|
329 |
+
def get_sessions(self) -> Dict[str, Dict]:
|
330 |
+
"""Get all active sessions"""
|
331 |
+
with self.lock:
|
332 |
+
return {
|
333 |
+
session_id: {
|
334 |
+
'virtual_ip': session.virtual_ip,
|
335 |
+
'virtual_port': session.virtual_port,
|
336 |
+
'real_ip': session.real_ip,
|
337 |
+
'real_port': session.real_port,
|
338 |
+
'host_ip': session.host_ip,
|
339 |
+
'host_port': session.host_port,
|
340 |
+
'protocol': session.protocol,
|
341 |
+
'nat_type': session.nat_type.value,
|
342 |
+
'created_time': session.created_time,
|
343 |
+
'last_activity': session.last_activity,
|
344 |
+
'duration': session.duration,
|
345 |
+
'bytes_in': session.bytes_in,
|
346 |
+
'bytes_out': session.bytes_out,
|
347 |
+
'packets_in': session.packets_in,
|
348 |
+
'packets_out': session.packets_out,
|
349 |
+
'is_expired': session.is_expired
|
350 |
+
}
|
351 |
+
for session_id, session in self.sessions.items()
|
352 |
+
}
|
353 |
+
|
354 |
+
def get_stats(self) -> Dict:
|
355 |
+
"""Get NAT statistics"""
|
356 |
+
port_stats = self.port_pool.get_stats()
|
357 |
+
|
358 |
+
with self.lock:
|
359 |
+
current_stats = self.stats.copy()
|
360 |
+
current_stats['active_sessions'] = len(self.sessions)
|
361 |
+
current_stats.update(port_stats)
|
362 |
+
|
363 |
+
return current_stats
|
364 |
+
|
365 |
+
def update_packet_stats(self, bytes_count: int):
|
366 |
+
"""Update packet statistics"""
|
367 |
+
self.stats['bytes_translated'] += bytes_count
|
368 |
+
self.stats['packets_translated'] += 1
|
369 |
+
|
370 |
+
def _cleanup_loop(self):
|
371 |
+
"""Background cleanup loop"""
|
372 |
+
while self.running:
|
373 |
+
try:
|
374 |
+
self._cleanup_expired_sessions()
|
375 |
+
time.sleep(30) # Cleanup every 30 seconds
|
376 |
+
except Exception as e:
|
377 |
+
print(f"NAT cleanup error: {e}")
|
378 |
+
time.sleep(5)
|
379 |
+
|
380 |
+
def start(self):
|
381 |
+
"""Start NAT engine"""
|
382 |
+
self.running = True
|
383 |
+
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
|
384 |
+
self.cleanup_thread.start()
|
385 |
+
print(f"NAT engine started - Host IP: {self.host_ip}, Port range: {self.port_pool.start_port}-{self.port_pool.end_port}")
|
386 |
+
|
387 |
+
def stop(self):
|
388 |
+
"""Stop NAT engine"""
|
389 |
+
self.running = False
|
390 |
+
if self.cleanup_thread:
|
391 |
+
self.cleanup_thread.join()
|
392 |
+
|
393 |
+
# Close all sessions
|
394 |
+
with self.lock:
|
395 |
+
session_ids = list(self.sessions.keys())
|
396 |
+
for session_id in session_ids:
|
397 |
+
self._remove_session(session_id)
|
398 |
+
|
399 |
+
print("NAT engine stopped")
|
400 |
+
|
401 |
+
def reset_stats(self):
|
402 |
+
"""Reset statistics"""
|
403 |
+
self.stats = {
|
404 |
+
'total_sessions': 0,
|
405 |
+
'active_sessions': len(self.sessions),
|
406 |
+
'expired_sessions': 0,
|
407 |
+
'port_exhaustion_events': 0,
|
408 |
+
'bytes_translated': 0,
|
409 |
+
'packets_translated': 0
|
410 |
+
}
|
411 |
+
|
412 |
+
|
413 |
+
class NATRule:
|
414 |
+
"""Represents a NAT rule for DNAT (port forwarding)"""
|
415 |
+
|
416 |
+
def __init__(self, external_port: int, internal_ip: str, internal_port: int,
|
417 |
+
protocol: str = 'TCP', enabled: bool = True):
|
418 |
+
self.external_port = external_port
|
419 |
+
self.internal_ip = internal_ip
|
420 |
+
self.internal_port = internal_port
|
421 |
+
self.protocol = protocol.upper()
|
422 |
+
self.enabled = enabled
|
423 |
+
self.created_time = time.time()
|
424 |
+
self.hit_count = 0
|
425 |
+
self.last_hit = None
|
426 |
+
|
427 |
+
def matches(self, port: int, protocol: str) -> bool:
|
428 |
+
"""Check if rule matches the given port and protocol"""
|
429 |
+
return (self.enabled and
|
430 |
+
self.external_port == port and
|
431 |
+
self.protocol == protocol.upper())
|
432 |
+
|
433 |
+
def record_hit(self):
|
434 |
+
"""Record a rule hit"""
|
435 |
+
self.hit_count += 1
|
436 |
+
self.last_hit = time.time()
|
437 |
+
|
438 |
+
def to_dict(self) -> Dict:
|
439 |
+
"""Convert rule to dictionary"""
|
440 |
+
return {
|
441 |
+
'external_port': self.external_port,
|
442 |
+
'internal_ip': self.internal_ip,
|
443 |
+
'internal_port': self.internal_port,
|
444 |
+
'protocol': self.protocol,
|
445 |
+
'enabled': self.enabled,
|
446 |
+
'created_time': self.created_time,
|
447 |
+
'hit_count': self.hit_count,
|
448 |
+
'last_hit': self.last_hit
|
449 |
+
}
|
450 |
+
|
451 |
+
|
452 |
+
class DNATEngine:
|
453 |
+
"""Destination NAT engine for port forwarding"""
|
454 |
+
|
455 |
+
def __init__(self):
|
456 |
+
self.rules: Dict[str, NATRule] = {} # rule_id -> rule
|
457 |
+
self.lock = threading.Lock()
|
458 |
+
|
459 |
+
def add_rule(self, rule_id: str, external_port: int, internal_ip: str,
|
460 |
+
internal_port: int, protocol: str = 'TCP') -> bool:
|
461 |
+
"""Add DNAT rule"""
|
462 |
+
with self.lock:
|
463 |
+
if rule_id in self.rules:
|
464 |
+
return False
|
465 |
+
|
466 |
+
rule = NATRule(external_port, internal_ip, internal_port, protocol)
|
467 |
+
self.rules[rule_id] = rule
|
468 |
+
return True
|
469 |
+
|
470 |
+
def remove_rule(self, rule_id: str) -> bool:
|
471 |
+
"""Remove DNAT rule"""
|
472 |
+
with self.lock:
|
473 |
+
if rule_id in self.rules:
|
474 |
+
del self.rules[rule_id]
|
475 |
+
return True
|
476 |
+
return False
|
477 |
+
|
478 |
+
def enable_rule(self, rule_id: str) -> bool:
|
479 |
+
"""Enable DNAT rule"""
|
480 |
+
with self.lock:
|
481 |
+
if rule_id in self.rules:
|
482 |
+
self.rules[rule_id].enabled = True
|
483 |
+
return True
|
484 |
+
return False
|
485 |
+
|
486 |
+
def disable_rule(self, rule_id: str) -> bool:
|
487 |
+
"""Disable DNAT rule"""
|
488 |
+
with self.lock:
|
489 |
+
if rule_id in self.rules:
|
490 |
+
self.rules[rule_id].enabled = False
|
491 |
+
return True
|
492 |
+
return False
|
493 |
+
|
494 |
+
def translate_inbound_dnat(self, external_port: int, protocol: str) -> Optional[Tuple[str, int]]:
|
495 |
+
"""Translate inbound packet using DNAT rules"""
|
496 |
+
with self.lock:
|
497 |
+
for rule in self.rules.values():
|
498 |
+
if rule.matches(external_port, protocol):
|
499 |
+
rule.record_hit()
|
500 |
+
return (rule.internal_ip, rule.internal_port)
|
501 |
+
|
502 |
+
return None
|
503 |
+
|
504 |
+
def get_rules(self) -> Dict[str, Dict]:
|
505 |
+
"""Get all DNAT rules"""
|
506 |
+
with self.lock:
|
507 |
+
return {
|
508 |
+
rule_id: rule.to_dict()
|
509 |
+
for rule_id, rule in self.rules.items()
|
510 |
+
}
|
511 |
+
|
512 |
+
def clear_rules(self):
|
513 |
+
"""Clear all DNAT rules"""
|
514 |
+
with self.lock:
|
515 |
+
self.rules.clear()
|
516 |
+
|
core/openvpn_manager.py
ADDED
@@ -0,0 +1,658 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
OpenVPN Manager Module
|
3 |
+
|
4 |
+
Manages OpenVPN server integration with the Virtual ISP Stack
|
5 |
+
"""
|
6 |
+
|
7 |
+
import os
|
8 |
+
import json
|
9 |
+
import subprocess
|
10 |
+
import threading
|
11 |
+
import time
|
12 |
+
import logging
|
13 |
+
from typing import Dict, List, Optional, Any
|
14 |
+
from dataclasses import dataclass, asdict
|
15 |
+
import ipaddress
|
16 |
+
|
17 |
+
logger = logging.getLogger(__name__)
|
18 |
+
|
19 |
+
@dataclass
|
20 |
+
class VPNClient:
|
21 |
+
"""Represents a connected VPN client"""
|
22 |
+
client_id: str
|
23 |
+
common_name: str
|
24 |
+
ip_address: str
|
25 |
+
connected_at: float
|
26 |
+
bytes_received: int = 0
|
27 |
+
bytes_sent: int = 0
|
28 |
+
status: str = "connected"
|
29 |
+
routed_through_vpn: bool = False
|
30 |
+
|
31 |
+
@dataclass
|
32 |
+
class VPNServerStatus:
|
33 |
+
"""Represents VPN server status"""
|
34 |
+
is_running: bool
|
35 |
+
connected_clients: int
|
36 |
+
total_bytes_received: int
|
37 |
+
total_bytes_sent: int
|
38 |
+
uptime: float
|
39 |
+
server_ip: str
|
40 |
+
server_port: int
|
41 |
+
|
42 |
+
class OpenVPNManager:
|
43 |
+
"""Manages OpenVPN server and client connections with traffic routing"""
|
44 |
+
|
45 |
+
def __init__(self, config: Dict[str, Any]):
|
46 |
+
self.config = config
|
47 |
+
self.server_config_path = "/etc/openvpn/server/server.conf"
|
48 |
+
self.status_log_path = "/var/log/openvpn/openvpn-status.log"
|
49 |
+
self.clients: Dict[str, VPNClient] = {}
|
50 |
+
self.server_process = None
|
51 |
+
self.is_running = False
|
52 |
+
self.start_time = None
|
53 |
+
|
54 |
+
# VPN network configuration
|
55 |
+
self.vpn_network = ipaddress.IPv4Network("10.8.0.0/24")
|
56 |
+
self.vpn_server_ip = "10.8.0.1"
|
57 |
+
self.vpn_port = 1194
|
58 |
+
|
59 |
+
# Integration with ISP stack
|
60 |
+
self.dhcp_server = None
|
61 |
+
self.nat_engine = None
|
62 |
+
self.firewall = None
|
63 |
+
self.router = None
|
64 |
+
self.traffic_router = None # New traffic router component
|
65 |
+
|
66 |
+
# Status monitoring thread
|
67 |
+
self.monitor_thread = None
|
68 |
+
self.monitor_running = False
|
69 |
+
|
70 |
+
# Client configuration storage
|
71 |
+
self.config_storage_path = "/tmp/vpn_client_configs"
|
72 |
+
os.makedirs(self.config_storage_path, exist_ok=True)
|
73 |
+
|
74 |
+
def set_isp_components(self, dhcp_server=None, nat_engine=None, firewall=None, router=None, traffic_router=None):
|
75 |
+
"""Set references to ISP stack components for integration"""
|
76 |
+
self.dhcp_server = dhcp_server
|
77 |
+
self.nat_engine = nat_engine
|
78 |
+
self.firewall = firewall
|
79 |
+
self.router = router
|
80 |
+
self.traffic_router = traffic_router
|
81 |
+
|
82 |
+
# Configure traffic router with other components
|
83 |
+
if self.traffic_router:
|
84 |
+
self.traffic_router.set_components(
|
85 |
+
nat_engine=nat_engine,
|
86 |
+
firewall=firewall,
|
87 |
+
dhcp_server=dhcp_server
|
88 |
+
)
|
89 |
+
|
90 |
+
def start_server(self) -> bool:
|
91 |
+
"""Start the OpenVPN server with traffic routing"""
|
92 |
+
try:
|
93 |
+
if self.is_running:
|
94 |
+
logger.warning("OpenVPN server is already running")
|
95 |
+
return True
|
96 |
+
|
97 |
+
# Ensure configuration exists
|
98 |
+
if not os.path.exists(self.server_config_path):
|
99 |
+
logger.error(f"OpenVPN server configuration not found: {self.server_config_path}")
|
100 |
+
return False
|
101 |
+
|
102 |
+
# Start traffic router first
|
103 |
+
if self.traffic_router and not self.traffic_router.is_running:
|
104 |
+
if not self.traffic_router.start():
|
105 |
+
logger.error("Failed to start traffic router")
|
106 |
+
return False
|
107 |
+
|
108 |
+
# Create log directory
|
109 |
+
os.makedirs("/var/log/openvpn", exist_ok=True)
|
110 |
+
|
111 |
+
# Start OpenVPN server
|
112 |
+
cmd = [
|
113 |
+
"/usr/bin/sudo", "openvpn",
|
114 |
+
"--config", self.server_config_path,
|
115 |
+
"--daemon", "openvpn-server",
|
116 |
+
"--log", "/var/log/openvpn/openvpn.log",
|
117 |
+
"--status", self.status_log_path, "10"
|
118 |
+
]
|
119 |
+
|
120 |
+
result = subprocess.run(cmd, capture_output=True, text=True)
|
121 |
+
|
122 |
+
if result.returncode == 0:
|
123 |
+
self.is_running = True
|
124 |
+
self.start_time = time.time()
|
125 |
+
logger.info("OpenVPN server started successfully with traffic routing")
|
126 |
+
|
127 |
+
# Start monitoring thread
|
128 |
+
self.start_monitoring()
|
129 |
+
|
130 |
+
# Configure firewall rules for VPN
|
131 |
+
self._configure_vpn_firewall()
|
132 |
+
|
133 |
+
# Configure NAT for VPN traffic
|
134 |
+
self._configure_vpn_nat()
|
135 |
+
|
136 |
+
return True
|
137 |
+
else:
|
138 |
+
logger.error(f"Failed to start OpenVPN server: {result.stderr}")
|
139 |
+
return False
|
140 |
+
|
141 |
+
except Exception as e:
|
142 |
+
logger.error(f"Error starting OpenVPN server: {e}")
|
143 |
+
return False
|
144 |
+
|
145 |
+
def stop_server(self) -> bool:
|
146 |
+
"""Stop the OpenVPN server and traffic routing"""
|
147 |
+
try:
|
148 |
+
if not self.is_running:
|
149 |
+
logger.warning("OpenVPN server is not running")
|
150 |
+
return True
|
151 |
+
|
152 |
+
# Stop monitoring
|
153 |
+
self.stop_monitoring()
|
154 |
+
|
155 |
+
# Remove all client routes before stopping
|
156 |
+
if self.traffic_router:
|
157 |
+
for client_id in list(self.clients.keys()):
|
158 |
+
self.traffic_router.remove_client_route(client_id)
|
159 |
+
|
160 |
+
# Kill OpenVPN process
|
161 |
+
result = subprocess.run(["/usr/bin/sudo", "pkill", "-f", "openvpn.*server"],
|
162 |
+
capture_output=True, text=True)
|
163 |
+
|
164 |
+
# Stop traffic router
|
165 |
+
if self.traffic_router and self.traffic_router.is_running:
|
166 |
+
self.traffic_router.stop()
|
167 |
+
|
168 |
+
self.is_running = False
|
169 |
+
self.start_time = None
|
170 |
+
self.clients.clear()
|
171 |
+
|
172 |
+
logger.info("OpenVPN server and traffic routing stopped")
|
173 |
+
return True
|
174 |
+
|
175 |
+
except Exception as e:
|
176 |
+
logger.error(f"Error stopping OpenVPN server: {e}")
|
177 |
+
return False
|
178 |
+
|
179 |
+
def start_monitoring(self):
|
180 |
+
"""Start the client monitoring thread"""
|
181 |
+
if self.monitor_thread and self.monitor_thread.is_alive():
|
182 |
+
return
|
183 |
+
|
184 |
+
self.monitor_running = True
|
185 |
+
self.monitor_thread = threading.Thread(target=self._monitor_clients, daemon=True)
|
186 |
+
self.monitor_thread.start()
|
187 |
+
logger.info("Started OpenVPN client monitoring")
|
188 |
+
|
189 |
+
def stop_monitoring(self):
|
190 |
+
"""Stop the client monitoring thread"""
|
191 |
+
self.monitor_running = False
|
192 |
+
if self.monitor_thread:
|
193 |
+
self.monitor_thread.join(timeout=5)
|
194 |
+
logger.info("Stopped OpenVPN client monitoring")
|
195 |
+
|
196 |
+
def _monitor_clients(self):
|
197 |
+
"""Monitor connected VPN clients"""
|
198 |
+
while self.monitor_running:
|
199 |
+
try:
|
200 |
+
self._update_client_status()
|
201 |
+
time.sleep(10) # Update every 10 seconds
|
202 |
+
except Exception as e:
|
203 |
+
logger.error(f"Error monitoring VPN clients: {e}")
|
204 |
+
time.sleep(30) # Wait longer on error
|
205 |
+
|
206 |
+
def _update_client_status(self):
|
207 |
+
"""Update client status from OpenVPN status log and manage traffic routing"""
|
208 |
+
try:
|
209 |
+
if not os.path.exists(self.status_log_path):
|
210 |
+
return
|
211 |
+
|
212 |
+
with open(self.status_log_path, 'r') as f:
|
213 |
+
content = f.read()
|
214 |
+
|
215 |
+
# Parse OpenVPN status log
|
216 |
+
lines = content.split('\n')
|
217 |
+
client_section = False
|
218 |
+
routing_section = False
|
219 |
+
|
220 |
+
current_clients = {}
|
221 |
+
previous_clients = set(self.clients.keys())
|
222 |
+
|
223 |
+
for line in lines:
|
224 |
+
line = line.strip()
|
225 |
+
|
226 |
+
if line.startswith("OpenVPN CLIENT LIST"):
|
227 |
+
client_section = True
|
228 |
+
continue
|
229 |
+
elif line.startswith("ROUTING TABLE"):
|
230 |
+
client_section = False
|
231 |
+
routing_section = True
|
232 |
+
continue
|
233 |
+
elif line.startswith("GLOBAL STATS"):
|
234 |
+
routing_section = False
|
235 |
+
continue
|
236 |
+
|
237 |
+
if client_section and line and not line.startswith("Updated,"):
|
238 |
+
# Parse client line: Common Name,Real Address,Bytes Received,Bytes Sent,Connected Since
|
239 |
+
parts = line.split(',')
|
240 |
+
if len(parts) >= 5:
|
241 |
+
common_name = parts[0]
|
242 |
+
real_address = parts[1]
|
243 |
+
bytes_received = int(parts[2]) if parts[2].isdigit() else 0
|
244 |
+
bytes_sent = int(parts[3]) if parts[3].isdigit() else 0
|
245 |
+
connected_since = parts[4]
|
246 |
+
|
247 |
+
# Get VPN IP from routing table (will be parsed later)
|
248 |
+
vpn_ip = "unknown"
|
249 |
+
|
250 |
+
# Check if this is an existing client
|
251 |
+
routed_through_vpn = False
|
252 |
+
if common_name in self.clients:
|
253 |
+
routed_through_vpn = self.clients[common_name].routed_through_vpn
|
254 |
+
|
255 |
+
client = VPNClient(
|
256 |
+
client_id=common_name,
|
257 |
+
common_name=common_name,
|
258 |
+
ip_address=vpn_ip,
|
259 |
+
connected_at=time.time(), # Simplified for now
|
260 |
+
bytes_received=bytes_received,
|
261 |
+
bytes_sent=bytes_sent,
|
262 |
+
routed_through_vpn=routed_through_vpn
|
263 |
+
)
|
264 |
+
current_clients[common_name] = client
|
265 |
+
|
266 |
+
elif routing_section and line and not line.startswith("Virtual Address,"):
|
267 |
+
# Parse routing line: Virtual Address,Common Name,Real Address,Last Ref
|
268 |
+
parts = line.split(',')
|
269 |
+
if len(parts) >= 2:
|
270 |
+
vpn_ip = parts[0]
|
271 |
+
common_name = parts[1]
|
272 |
+
|
273 |
+
if common_name in current_clients:
|
274 |
+
current_clients[common_name].ip_address = vpn_ip
|
275 |
+
|
276 |
+
# Handle new clients - set up traffic routing
|
277 |
+
new_clients = set(current_clients.keys()) - previous_clients
|
278 |
+
for client_id in new_clients:
|
279 |
+
client = current_clients[client_id]
|
280 |
+
if client.ip_address != "unknown" and self.traffic_router:
|
281 |
+
# Add client route for free data access
|
282 |
+
if self.traffic_router.add_client_route(client_id, client.ip_address):
|
283 |
+
client.routed_through_vpn = True
|
284 |
+
logger.info(f"Added traffic routing for new VPN client: {client_id} ({client.ip_address})")
|
285 |
+
|
286 |
+
# Handle disconnected clients - clean up routing
|
287 |
+
disconnected_clients = previous_clients - set(current_clients.keys())
|
288 |
+
for client_id in disconnected_clients:
|
289 |
+
if self.traffic_router:
|
290 |
+
self.traffic_router.remove_client_route(client_id)
|
291 |
+
logger.info(f"Removed traffic routing for disconnected VPN client: {client_id}")
|
292 |
+
|
293 |
+
# Update clients dictionary
|
294 |
+
self.clients = current_clients
|
295 |
+
|
296 |
+
# Integrate with DHCP server if available
|
297 |
+
if self.dhcp_server:
|
298 |
+
self._sync_with_dhcp()
|
299 |
+
|
300 |
+
except Exception as e:
|
301 |
+
logger.error(f"Error updating client status: {e}")
|
302 |
+
|
303 |
+
def _sync_with_dhcp(self):
|
304 |
+
"""Sync VPN clients with DHCP server"""
|
305 |
+
try:
|
306 |
+
for client in self.clients.values():
|
307 |
+
if client.ip_address != "unknown":
|
308 |
+
# Register VPN client IP with DHCP server
|
309 |
+
# This allows the ISP stack to track VPN clients
|
310 |
+
if hasattr(self.dhcp_server, 'register_static_lease'):
|
311 |
+
self.dhcp_server.register_static_lease(
|
312 |
+
client.common_name,
|
313 |
+
client.ip_address,
|
314 |
+
"VPN Client"
|
315 |
+
)
|
316 |
+
except Exception as e:
|
317 |
+
logger.error(f"Error syncing with DHCP: {e}")
|
318 |
+
|
319 |
+
def _configure_vpn_firewall(self):
|
320 |
+
"""Configure firewall rules for VPN traffic"""
|
321 |
+
try:
|
322 |
+
if not self.firewall:
|
323 |
+
return
|
324 |
+
|
325 |
+
# Add firewall rules for VPN
|
326 |
+
vpn_rules = [
|
327 |
+
{
|
328 |
+
"rule_id": "allow_openvpn",
|
329 |
+
"priority": 10,
|
330 |
+
"action": "ACCEPT",
|
331 |
+
"direction": "BOTH",
|
332 |
+
"dest_port": str(self.vpn_port),
|
333 |
+
"protocol": "UDP",
|
334 |
+
"description": "Allow OpenVPN traffic",
|
335 |
+
"enabled": True
|
336 |
+
},
|
337 |
+
{
|
338 |
+
"rule_id": "allow_vpn_network",
|
339 |
+
"priority": 11,
|
340 |
+
"action": "ACCEPT",
|
341 |
+
"direction": "BOTH",
|
342 |
+
"source_network": str(self.vpn_network),
|
343 |
+
"description": "Allow VPN client network traffic",
|
344 |
+
"enabled": True
|
345 |
+
}
|
346 |
+
]
|
347 |
+
|
348 |
+
for rule in vpn_rules:
|
349 |
+
if hasattr(self.firewall, 'add_rule'):
|
350 |
+
self.firewall.add_rule(rule)
|
351 |
+
|
352 |
+
logger.info("Configured firewall rules for VPN")
|
353 |
+
|
354 |
+
except Exception as e:
|
355 |
+
logger.error(f"Error configuring VPN firewall: {e}")
|
356 |
+
|
357 |
+
def _configure_vpn_nat(self):
|
358 |
+
"""Configure NAT for VPN traffic"""
|
359 |
+
try:
|
360 |
+
# NAT configuration will be handled by the external environment (e.g., HuggingFace Spaces setup)
|
361 |
+
# or by the underlying network infrastructure. We are removing direct iptables calls.
|
362 |
+
logger.info("Skipping direct iptables NAT configuration as per instructions.")
|
363 |
+
|
364 |
+
except Exception as e:
|
365 |
+
logger.error(f"Error configuring VPN NAT: {e}")
|
366 |
+
|
367 |
+
def get_server_status(self) -> VPNServerStatus:
|
368 |
+
"""Get current server status"""
|
369 |
+
total_bytes_received = sum(client.bytes_received for client in self.clients.values())
|
370 |
+
total_bytes_sent = sum(client.bytes_sent for client in self.clients.values())
|
371 |
+
uptime = time.time() - self.start_time if self.start_time else 0
|
372 |
+
|
373 |
+
return VPNServerStatus(
|
374 |
+
is_running=self.is_running,
|
375 |
+
connected_clients=len(self.clients),
|
376 |
+
total_bytes_received=total_bytes_received,
|
377 |
+
total_bytes_sent=total_bytes_sent,
|
378 |
+
uptime=uptime,
|
379 |
+
server_ip=self.vpn_server_ip,
|
380 |
+
server_port=self.vpn_port
|
381 |
+
)
|
382 |
+
|
383 |
+
def get_connected_clients(self) -> List[Dict[str, Any]]:
|
384 |
+
"""Get list of connected clients"""
|
385 |
+
return [asdict(client) for client in self.clients.values()]
|
386 |
+
|
387 |
+
def disconnect_client(self, client_id: str) -> bool:
|
388 |
+
"""Disconnect a specific client"""
|
389 |
+
try:
|
390 |
+
if client_id not in self.clients:
|
391 |
+
return False
|
392 |
+
|
393 |
+
# Send kill signal to specific client
|
394 |
+
# This requires OpenVPN management interface, simplified for now
|
395 |
+
logger.info(f"Disconnecting client: {client_id}")
|
396 |
+
|
397 |
+
# Remove from clients dict
|
398 |
+
del self.clients[client_id]
|
399 |
+
return True
|
400 |
+
|
401 |
+
except Exception as e:
|
402 |
+
logger.error(f"Error disconnecting client {client_id}: {e}")
|
403 |
+
return False
|
404 |
+
|
405 |
+
def generate_client_config(self, client_name: str, server_ip: str) -> str:
|
406 |
+
"""Generate client configuration file with embedded certificates"""
|
407 |
+
try:
|
408 |
+
# Read real CA certificate
|
409 |
+
ca_cert_path = "/etc/openvpn/server/ca.crt"
|
410 |
+
try:
|
411 |
+
with open(ca_cert_path, 'r') as f:
|
412 |
+
ca_cert = f.read()
|
413 |
+
except FileNotFoundError:
|
414 |
+
# Fallback to embedded certificate for development
|
415 |
+
ca_cert = """-----BEGIN CERTIFICATE-----
|
416 |
+
MIIDMzCCAhugAwIBAgIUNO765P4t/yD/PnIFTMVs0Q32TJYwDQYJKoZIhvcNAQEL
|
417 |
+
BQAwDjEMMAoGA1UEAwwDeWVzMB4XDTI1MDgwMjAxMjkzNVoXDTM1MDczMTAxMjkz
|
418 |
+
NVowDjEMMAoGA1UEAwwDeWVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
|
419 |
+
AQEAtwhMGXouHnHBRd2RhdrW8sOMgqt4wDXZC0J+4UMjOX6Y7t2O1Sgw/sWhwFPk
|
420 |
+
QF/cMoQIvsucklPogcnzzGtv9zDkAXyVyCC27UYbg8JfWZK3ZMrt6dfEmYf4KKXm
|
421 |
+
D6PLn9guxzBB63dhEWx/7fd6H9C/rK/u0rOh15DQRnfEI468cmXS5uNg8ke/73+y
|
422 |
+
Gzb6q7ZOFByBAwM0hW0lStBaIIcxouFrIK8B72O8H+6t10K1GvgiBhKvM3cc8dpN
|
423 |
+
y4qvRoN/o+eXarZG7G9dfm9OFgdd9LoXPTTbO+ftFPKOq4F41PnMd2Zcyk7P3GCr
|
424 |
+
3oK7NbISxZ5efLpy45lgSpqKBwIDAQABo4GIMIGFMB0GA1UdDgQWBBQIi0Er30cV
|
425 |
+
Qzi+U/LPV4Lf3yvGIzBJBgNVHSMEQjBAgBQIi0Er30cVQzi+U/LPV4Lf3yvGI6ES
|
426 |
+
pBAwDjEMMAoGA1UEAwwDeWVzghQ07vrk/i3/IP8+cgVMxWzRDfZMljAMBgNVHRME
|
427 |
+
BTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAHzfSFbi1G7WC
|
428 |
+
vMSOqSv4/jlBExnz/AlLUBHhgDomIdLK8Pb3tyCD5IYkmi0NT5x6DORcOV2ow1JZ
|
429 |
+
o4BL7OVV+fhz3VKXEpG+s3gq5j2m+raqLtu6QKBGg7SIUZ4MLjggvAcPjsK+n8sK
|
430 |
+
86sAUFVTccBxJlKBShAUPSNihyWwxB4PQFvwhefNQSoID1kAB2Fzf1beMX6Gp6Lj
|
431 |
+
ldI6e63lpYtIbp4+2F5SxJ/hGTUx+nWbOAHPvhBfhN6sEu9G1C5KPR0cm+xxOpZ9
|
432 |
+
lA7y4Dea7pyVybR/b7lFquE3TReXCoLx79UNNSv8erIlsy1jh9yXDnTCk8SN1dpO
|
433 |
+
YwJ9U0AHXA==
|
434 |
+
-----END CERTIFICATE-----"""
|
435 |
+
|
436 |
+
# Sample client certificate (in production, generate unique per client)
|
437 |
+
client_cert = f"""-----BEGIN CERTIFICATE-----
|
438 |
+
MIIDSzCCAjOgAwIBAgIU{client_name}1234567890abcdefghijklmnopqrstuvwxyz
|
439 |
+
ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890abcdefghijklmnopqrstuvwxyz1234567
|
440 |
+
890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12345
|
441 |
+
67890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz123
|
442 |
+
4567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1
|
443 |
+
234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxy
|
444 |
+
z1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuv
|
445 |
+
wxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrs
|
446 |
+
tuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmno
|
447 |
+
pqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghij
|
448 |
+
klmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcd
|
449 |
+
efghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567
|
450 |
+
890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxy
|
451 |
+
z1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnop
|
452 |
+
qrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcde
|
453 |
+
fghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12
|
454 |
+
34567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijk
|
455 |
+
lmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12
|
456 |
+
34567890abcdefghijklmnopqrstuvwxyz1234567890abc
|
457 |
+
defghijklmnopqrstuvwxyz1234567890abcdefg
|
458 |
+
hijklmnopqrstuvwxyz1234567890
|
459 |
+
abcdefghijklmnopqr
|
460 |
+
stuvwxyz12345
|
461 |
+
67890abc
|
462 |
+
def
|
463 |
+
-----END CERTIFICATE-----"""
|
464 |
+
|
465 |
+
# Sample client private key (in production, generate unique per client)
|
466 |
+
client_key = f"""-----BEGIN PRIVATE KEY-----
|
467 |
+
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC{client_name}1234567
|
468 |
+
890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz123456
|
469 |
+
7890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12345
|
470 |
+
67890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234
|
471 |
+
567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz123
|
472 |
+
4567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12
|
473 |
+
34567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1
|
474 |
+
234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxy
|
475 |
+
z1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuv
|
476 |
+
wxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrs
|
477 |
+
tuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmno
|
478 |
+
pqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghij
|
479 |
+
klmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcd
|
480 |
+
efghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567
|
481 |
+
890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxy
|
482 |
+
z1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnop
|
483 |
+
qrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcde
|
484 |
+
fghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12
|
485 |
+
34567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijk
|
486 |
+
lmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12
|
487 |
+
34567890abcdefghijklmnopqrstuvwxyz1234567890abc
|
488 |
+
defghijklmnopqrstuvwxyz1234567890abcdefg
|
489 |
+
hijklmnopqrstuvwxyz1234567890
|
490 |
+
abcdefghijklmnopqr
|
491 |
+
stuvwxyz12345
|
492 |
+
67890abc
|
493 |
+
defghijk
|
494 |
+
lmnopqr
|
495 |
+
stuv
|
496 |
+
-----END PRIVATE KEY-----"""
|
497 |
+
|
498 |
+
# Generate complete client configuration
|
499 |
+
client_config = f"""# OpenVPN Client Configuration for {client_name}
|
500 |
+
# Generated by Virtual ISP Stack
|
501 |
+
# Server: {server_ip}:{self.vpn_port}
|
502 |
+
|
503 |
+
client
|
504 |
+
dev tun
|
505 |
+
proto udp
|
506 |
+
remote {server_ip} {self.vpn_port}
|
507 |
+
resolv-retry infinite
|
508 |
+
nobind
|
509 |
+
persist-key
|
510 |
+
persist-tun
|
511 |
+
cipher AES-256-CBC
|
512 |
+
auth SHA256
|
513 |
+
verb 3
|
514 |
+
key-direction 1
|
515 |
+
redirect-gateway def1 bypass-dhcp
|
516 |
+
dhcp-option DNS 8.8.8.8
|
517 |
+
dhcp-option DNS 8.8.4.4
|
518 |
+
remote-cert-tls server
|
519 |
+
|
520 |
+
# Embedded CA Certificate
|
521 |
+
<ca>
|
522 |
+
{ca_cert}
|
523 |
+
</ca>
|
524 |
+
|
525 |
+
# Embedded Client Certificate
|
526 |
+
<cert>
|
527 |
+
{client_cert}
|
528 |
+
</cert>
|
529 |
+
|
530 |
+
# Embedded Client Private Key
|
531 |
+
<key>
|
532 |
+
{client_key}
|
533 |
+
</key>
|
534 |
+
|
535 |
+
# TLS Authentication Key (optional, for extra security)
|
536 |
+
# <tls-auth>
|
537 |
+
# -----BEGIN OpenVPN Static key V1-----
|
538 |
+
# [TLS-AUTH-KEY-CONTENT-WOULD-GO-HERE]
|
539 |
+
# -----END OpenVPN Static key V1-----
|
540 |
+
# </tls-auth>
|
541 |
+
"""
|
542 |
+
|
543 |
+
logger.info(f"Generated client configuration for {client_name}")
|
544 |
+
return client_config
|
545 |
+
|
546 |
+
except Exception as e:
|
547 |
+
logger.error(f"Error generating client config: {e}")
|
548 |
+
return ""
|
549 |
+
|
550 |
+
def save_client_config(self, client_name: str, config_content: str) -> bool:
|
551 |
+
"""Save client configuration to storage"""
|
552 |
+
try:
|
553 |
+
config_file_path = os.path.join(self.config_storage_path, f"{client_name}.ovpn")
|
554 |
+
with open(config_file_path, 'w') as f:
|
555 |
+
f.write(config_content)
|
556 |
+
|
557 |
+
logger.info(f"Saved client configuration for {client_name}")
|
558 |
+
return True
|
559 |
+
|
560 |
+
except Exception as e:
|
561 |
+
logger.error(f"Error saving client config for {client_name}: {e}")
|
562 |
+
return False
|
563 |
+
|
564 |
+
def load_client_config(self, client_name: str) -> str:
|
565 |
+
"""Load client configuration from storage"""
|
566 |
+
try:
|
567 |
+
config_file_path = os.path.join(self.config_storage_path, f"{client_name}.ovpn")
|
568 |
+
if not os.path.exists(config_file_path):
|
569 |
+
return ""
|
570 |
+
|
571 |
+
with open(config_file_path, 'r') as f:
|
572 |
+
config_content = f.read()
|
573 |
+
|
574 |
+
logger.info(f"Loaded client configuration for {client_name}")
|
575 |
+
return config_content
|
576 |
+
|
577 |
+
except Exception as e:
|
578 |
+
logger.error(f"Error loading client config for {client_name}: {e}")
|
579 |
+
return ""
|
580 |
+
|
581 |
+
def list_client_configs(self) -> List[str]:
|
582 |
+
"""List all stored client configurations"""
|
583 |
+
try:
|
584 |
+
config_files = []
|
585 |
+
if os.path.exists(self.config_storage_path):
|
586 |
+
for filename in os.listdir(self.config_storage_path):
|
587 |
+
if filename.endswith('.ovpn'):
|
588 |
+
client_name = filename[:-5] # Remove .ovpn extension
|
589 |
+
config_files.append(client_name)
|
590 |
+
|
591 |
+
return config_files
|
592 |
+
|
593 |
+
except Exception as e:
|
594 |
+
logger.error(f"Error listing client configs: {e}")
|
595 |
+
return []
|
596 |
+
|
597 |
+
def delete_client_config(self, client_name: str) -> bool:
|
598 |
+
"""Delete client configuration from storage"""
|
599 |
+
try:
|
600 |
+
config_file_path = os.path.join(self.config_storage_path, f"{client_name}.ovpn")
|
601 |
+
if os.path.exists(config_file_path):
|
602 |
+
os.remove(config_file_path)
|
603 |
+
logger.info(f"Deleted client configuration for {client_name}")
|
604 |
+
return True
|
605 |
+
else:
|
606 |
+
logger.warning(f"Client configuration for {client_name} not found")
|
607 |
+
return False
|
608 |
+
|
609 |
+
except Exception as e:
|
610 |
+
logger.error(f"Error deleting client config for {client_name}: {e}")
|
611 |
+
return False
|
612 |
+
|
613 |
+
def generate_and_save_client_config(self, client_name: str, server_ip: str) -> str:
|
614 |
+
"""Generate client configuration and save it to storage"""
|
615 |
+
try:
|
616 |
+
config_content = self.generate_client_config(client_name, server_ip)
|
617 |
+
if config_content:
|
618 |
+
if self.save_client_config(client_name, config_content):
|
619 |
+
return config_content
|
620 |
+
return ""
|
621 |
+
|
622 |
+
except Exception as e:
|
623 |
+
logger.error(f"Error generating and saving client config for {client_name}: {e}")
|
624 |
+
return ""
|
625 |
+
|
626 |
+
def get_statistics(self) -> Dict[str, Any]:
|
627 |
+
"""Get comprehensive VPN statistics"""
|
628 |
+
status = self.get_server_status()
|
629 |
+
|
630 |
+
return {
|
631 |
+
"server_status": asdict(status),
|
632 |
+
"connected_clients": self.get_connected_clients(),
|
633 |
+
"network_config": {
|
634 |
+
"vpn_network": str(self.vpn_network),
|
635 |
+
"server_ip": self.vpn_server_ip,
|
636 |
+
"server_port": self.vpn_port
|
637 |
+
},
|
638 |
+
"integration_status": {
|
639 |
+
"dhcp_integrated": self.dhcp_server is not None,
|
640 |
+
"nat_integrated": self.nat_engine is not None,
|
641 |
+
"firewall_integrated": self.firewall is not None,
|
642 |
+
"router_integrated": self.router is not None
|
643 |
+
}
|
644 |
+
}
|
645 |
+
|
646 |
+
# Global OpenVPN manager instance
|
647 |
+
openvpn_manager = None
|
648 |
+
|
649 |
+
def initialize_openvpn_manager(config: Dict[str, Any]) -> OpenVPNManager:
|
650 |
+
"""Initialize the OpenVPN manager"""
|
651 |
+
global openvpn_manager
|
652 |
+
openvpn_manager = OpenVPNManager(config)
|
653 |
+
return openvpn_manager
|
654 |
+
|
655 |
+
def get_openvpn_manager() -> Optional[OpenVPNManager]:
|
656 |
+
"""Get the global OpenVPN manager instance"""
|
657 |
+
return openvpn_manager
|
658 |
+
|
core/packet_bridge.py
ADDED
@@ -0,0 +1,664 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Packet Bridge Module
|
3 |
+
|
4 |
+
Handles communication with virtual clients:
|
5 |
+
- Accept packet streams over WebSocket/TCP
|
6 |
+
- Deliver response packets back to clients
|
7 |
+
- Frame processing (Ethernet → IPv4)
|
8 |
+
- Connection management
|
9 |
+
"""
|
10 |
+
|
11 |
+
import asyncio
|
12 |
+
import websockets
|
13 |
+
import socket
|
14 |
+
import threading
|
15 |
+
import time
|
16 |
+
import struct
|
17 |
+
from typing import Dict, List, Optional, Callable, Set, Any, Tuple
|
18 |
+
from dataclasses import dataclass
|
19 |
+
from enum import Enum
|
20 |
+
import json
|
21 |
+
import logging
|
22 |
+
|
23 |
+
from .ip_parser import IPParser, ParsedPacket
|
24 |
+
|
25 |
+
|
26 |
+
class BridgeType(Enum):
|
27 |
+
WEBSOCKET = "WEBSOCKET"
|
28 |
+
TCP_SOCKET = "TCP_SOCKET"
|
29 |
+
UDP_SOCKET = "UDP_SOCKET"
|
30 |
+
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class ClientConnection:
|
34 |
+
"""Represents a client connection to the bridge"""
|
35 |
+
client_id: str
|
36 |
+
bridge_type: BridgeType
|
37 |
+
remote_address: str
|
38 |
+
remote_port: int
|
39 |
+
websocket: Optional[Any] = None # WebSocket connection
|
40 |
+
socket: Optional['socket.socket'] = None # TCP/UDP socket
|
41 |
+
connected_time: float = 0
|
42 |
+
last_activity: float = 0
|
43 |
+
packets_received: int = 0
|
44 |
+
packets_sent: int = 0
|
45 |
+
bytes_received: int = 0
|
46 |
+
bytes_sent: int = 0
|
47 |
+
is_active: bool = True
|
48 |
+
|
49 |
+
def __post_init__(self):
|
50 |
+
if self.connected_time == 0:
|
51 |
+
self.connected_time = time.time()
|
52 |
+
if self.last_activity == 0:
|
53 |
+
self.last_activity = time.time()
|
54 |
+
|
55 |
+
def update_activity(self, packet_count: int = 1, byte_count: int = 0, direction: str = 'received'):
|
56 |
+
"""Update connection activity"""
|
57 |
+
self.last_activity = time.time()
|
58 |
+
|
59 |
+
if direction == 'received':
|
60 |
+
self.packets_received += packet_count
|
61 |
+
self.bytes_received += byte_count
|
62 |
+
else:
|
63 |
+
self.packets_sent += packet_count
|
64 |
+
self.bytes_sent += byte_count
|
65 |
+
|
66 |
+
def to_dict(self) -> Dict:
|
67 |
+
"""Convert to dictionary"""
|
68 |
+
return {
|
69 |
+
'client_id': self.client_id,
|
70 |
+
'bridge_type': self.bridge_type.value,
|
71 |
+
'remote_address': self.remote_address,
|
72 |
+
'remote_port': self.remote_port,
|
73 |
+
'connected_time': self.connected_time,
|
74 |
+
'last_activity': self.last_activity,
|
75 |
+
'packets_received': self.packets_received,
|
76 |
+
'packets_sent': self.packets_sent,
|
77 |
+
'bytes_received': self.bytes_received,
|
78 |
+
'bytes_sent': self.bytes_sent,
|
79 |
+
'is_active': self.is_active,
|
80 |
+
'duration': time.time() - self.connected_time
|
81 |
+
}
|
82 |
+
|
83 |
+
|
84 |
+
class EthernetFrame:
|
85 |
+
"""Ethernet frame parser"""
|
86 |
+
|
87 |
+
def __init__(self):
|
88 |
+
self.dest_mac = b'\x00' * 6
|
89 |
+
self.src_mac = b'\x00' * 6
|
90 |
+
self.ethertype = 0x0800 # IPv4
|
91 |
+
self.payload = b''
|
92 |
+
|
93 |
+
@classmethod
|
94 |
+
def parse(cls, data: bytes) -> Optional['EthernetFrame']:
|
95 |
+
"""Parse Ethernet frame from raw bytes"""
|
96 |
+
if len(data) < 14: # Minimum Ethernet header size
|
97 |
+
return None
|
98 |
+
|
99 |
+
frame = cls()
|
100 |
+
frame.dest_mac = data[0:6]
|
101 |
+
frame.src_mac = data[6:12]
|
102 |
+
frame.ethertype = struct.unpack('!H', data[12:14])[0]
|
103 |
+
frame.payload = data[14:]
|
104 |
+
|
105 |
+
return frame
|
106 |
+
|
107 |
+
def build(self) -> bytes:
|
108 |
+
"""Build Ethernet frame as bytes"""
|
109 |
+
header = self.dest_mac + self.src_mac + struct.pack('!H', self.ethertype)
|
110 |
+
return header + self.payload
|
111 |
+
|
112 |
+
def is_ipv4(self) -> bool:
|
113 |
+
"""Check if frame contains IPv4 packet"""
|
114 |
+
return self.ethertype == 0x0800
|
115 |
+
|
116 |
+
def is_arp(self) -> bool:
|
117 |
+
"""Check if frame contains ARP packet"""
|
118 |
+
return self.ethertype == 0x0806
|
119 |
+
|
120 |
+
|
121 |
+
class PacketBridge:
|
122 |
+
"""Packet bridge implementation"""
|
123 |
+
|
124 |
+
def __init__(self, config: Dict):
|
125 |
+
self.config = config
|
126 |
+
self.clients: Dict[str, ClientConnection] = {}
|
127 |
+
self.packet_handlers: List[Callable[[ParsedPacket, str], Optional[bytes]]] = []
|
128 |
+
self.lock = threading.Lock()
|
129 |
+
|
130 |
+
# Configuration
|
131 |
+
self.websocket_host = config.get('websocket_host', '0.0.0.0')
|
132 |
+
self.websocket_port = config.get('websocket_port', 8765)
|
133 |
+
self.tcp_host = config.get('tcp_host', '0.0.0.0')
|
134 |
+
self.tcp_port = config.get('tcp_port', 8766)
|
135 |
+
self.max_clients = config.get('max_clients', 100)
|
136 |
+
self.client_timeout = config.get('client_timeout', 300)
|
137 |
+
|
138 |
+
# WebSocket server
|
139 |
+
self.websocket_server = None
|
140 |
+
self.tcp_server_socket = None
|
141 |
+
|
142 |
+
# Background tasks
|
143 |
+
self.running = False
|
144 |
+
self.websocket_task = None
|
145 |
+
self.tcp_task = None
|
146 |
+
self.cleanup_task = None
|
147 |
+
|
148 |
+
# Statistics
|
149 |
+
self.stats = {
|
150 |
+
'total_clients': 0,
|
151 |
+
'active_clients': 0,
|
152 |
+
'packets_processed': 0,
|
153 |
+
'packets_forwarded': 0,
|
154 |
+
'packets_dropped': 0,
|
155 |
+
'bytes_processed': 0,
|
156 |
+
'websocket_connections': 0,
|
157 |
+
'tcp_connections': 0,
|
158 |
+
'connection_errors': 0
|
159 |
+
}
|
160 |
+
|
161 |
+
# Event loop
|
162 |
+
self.loop = None
|
163 |
+
|
164 |
+
def add_packet_handler(self, handler: Callable[[ParsedPacket, str], Optional[bytes]]):
|
165 |
+
"""Add packet handler function"""
|
166 |
+
self.packet_handlers.append(handler)
|
167 |
+
|
168 |
+
def remove_packet_handler(self, handler: Callable[[ParsedPacket, str], Optional[bytes]]):
|
169 |
+
"""Remove packet handler function"""
|
170 |
+
if handler in self.packet_handlers:
|
171 |
+
self.packet_handlers.remove(handler)
|
172 |
+
|
173 |
+
def _generate_client_id(self, remote_address: str, remote_port: int) -> str:
|
174 |
+
"""Generate unique client ID"""
|
175 |
+
timestamp = int(time.time() * 1000)
|
176 |
+
return f"client_{remote_address}_{remote_port}_{timestamp}"
|
177 |
+
|
178 |
+
def _process_ethernet_frame(self, frame_data: bytes, client_id: str) -> Optional[bytes]:
|
179 |
+
"""Process Ethernet frame and extract IP packet"""
|
180 |
+
try:
|
181 |
+
# Parse Ethernet frame
|
182 |
+
frame = EthernetFrame.parse(frame_data)
|
183 |
+
if not frame or not frame.is_ipv4():
|
184 |
+
return None
|
185 |
+
|
186 |
+
# Parse IP packet
|
187 |
+
packet = IPParser.parse_packet(frame.payload)
|
188 |
+
self.stats['packets_processed'] += 1
|
189 |
+
self.stats['bytes_processed'] += len(frame_data)
|
190 |
+
|
191 |
+
# Process through packet handlers
|
192 |
+
response_packet = None
|
193 |
+
for handler in self.packet_handlers:
|
194 |
+
try:
|
195 |
+
response = handler(packet, client_id)
|
196 |
+
if response:
|
197 |
+
response_packet = response
|
198 |
+
break
|
199 |
+
except Exception as e:
|
200 |
+
logging.error(f"Packet handler error: {e}")
|
201 |
+
|
202 |
+
if response_packet:
|
203 |
+
# Wrap response in Ethernet frame
|
204 |
+
response_frame = EthernetFrame()
|
205 |
+
response_frame.dest_mac = frame.src_mac
|
206 |
+
response_frame.src_mac = frame.dest_mac
|
207 |
+
response_frame.ethertype = 0x0800
|
208 |
+
response_frame.payload = response_packet
|
209 |
+
|
210 |
+
self.stats['packets_forwarded'] += 1
|
211 |
+
return response_frame.build()
|
212 |
+
else:
|
213 |
+
self.stats['packets_dropped'] += 1
|
214 |
+
return None
|
215 |
+
|
216 |
+
except Exception as e:
|
217 |
+
logging.error(f"Error processing Ethernet frame: {e}")
|
218 |
+
self.stats['packets_dropped'] += 1
|
219 |
+
return None
|
220 |
+
|
221 |
+
async def _handle_websocket_client(self, websocket, path):
|
222 |
+
"""Handle WebSocket client connection"""
|
223 |
+
client_address = websocket.remote_address
|
224 |
+
client_id = self._generate_client_id(client_address[0], client_address[1])
|
225 |
+
|
226 |
+
# Create client connection
|
227 |
+
client = ClientConnection(
|
228 |
+
client_id=client_id,
|
229 |
+
bridge_type=BridgeType.WEBSOCKET,
|
230 |
+
remote_address=client_address[0],
|
231 |
+
remote_port=client_address[1],
|
232 |
+
websocket=websocket
|
233 |
+
)
|
234 |
+
|
235 |
+
with self.lock:
|
236 |
+
if len(self.clients) >= self.max_clients:
|
237 |
+
await websocket.close(code=1013, reason="Too many clients")
|
238 |
+
return
|
239 |
+
|
240 |
+
self.clients[client_id] = client
|
241 |
+
|
242 |
+
self.stats['total_clients'] += 1
|
243 |
+
self.stats['active_clients'] = len(self.clients)
|
244 |
+
self.stats['websocket_connections'] += 1
|
245 |
+
|
246 |
+
logging.info(f"WebSocket client connected: {client_id} from {client_address}")
|
247 |
+
|
248 |
+
try:
|
249 |
+
async for message in websocket:
|
250 |
+
if isinstance(message, bytes):
|
251 |
+
# Binary message - treat as Ethernet frame
|
252 |
+
client.update_activity(1, len(message), 'received')
|
253 |
+
|
254 |
+
response = self._process_ethernet_frame(message, client_id)
|
255 |
+
if response:
|
256 |
+
await websocket.send(response)
|
257 |
+
client.update_activity(1, len(response), 'sent')
|
258 |
+
|
259 |
+
elif isinstance(message, str):
|
260 |
+
# Text message - treat as control message
|
261 |
+
try:
|
262 |
+
control_msg = json.loads(message)
|
263 |
+
await self._handle_control_message(client, control_msg)
|
264 |
+
except json.JSONDecodeError:
|
265 |
+
logging.warning(f"Invalid control message from {client_id}: {message}")
|
266 |
+
|
267 |
+
except websockets.exceptions.ConnectionClosed:
|
268 |
+
logging.info(f"WebSocket client disconnected: {client_id}")
|
269 |
+
except Exception as e:
|
270 |
+
logging.error(f"WebSocket client error: {e}")
|
271 |
+
self.stats['connection_errors'] += 1
|
272 |
+
|
273 |
+
finally:
|
274 |
+
# Clean up client
|
275 |
+
with self.lock:
|
276 |
+
if client_id in self.clients:
|
277 |
+
self.clients[client_id].is_active = False
|
278 |
+
del self.clients[client_id]
|
279 |
+
|
280 |
+
self.stats['active_clients'] = len(self.clients)
|
281 |
+
|
282 |
+
async def _handle_control_message(self, client: ClientConnection, message: Dict):
|
283 |
+
"""Handle control message from client"""
|
284 |
+
msg_type = message.get('type')
|
285 |
+
|
286 |
+
if msg_type == 'ping':
|
287 |
+
# Respond to ping
|
288 |
+
response = {'type': 'pong', 'timestamp': time.time()}
|
289 |
+
await client.websocket.send(json.dumps(response))
|
290 |
+
|
291 |
+
elif msg_type == 'stats':
|
292 |
+
# Send client statistics
|
293 |
+
response = {
|
294 |
+
'type': 'stats',
|
295 |
+
'client_stats': client.to_dict(),
|
296 |
+
'bridge_stats': self.get_stats()
|
297 |
+
}
|
298 |
+
await client.websocket.send(json.dumps(response))
|
299 |
+
|
300 |
+
elif msg_type == 'config':
|
301 |
+
# Handle configuration updates
|
302 |
+
config_data = message.get('data', {})
|
303 |
+
# Process configuration updates here
|
304 |
+
response = {'type': 'config_ack', 'status': 'ok'}
|
305 |
+
await client.websocket.send(json.dumps(response))
|
306 |
+
|
307 |
+
def _handle_tcp_client(self, client_socket: socket.socket, client_address: Tuple[str, int]):
|
308 |
+
"""Handle TCP client connection"""
|
309 |
+
client_id = self._generate_client_id(client_address[0], client_address[1])
|
310 |
+
|
311 |
+
# Create client connection
|
312 |
+
client = ClientConnection(
|
313 |
+
client_id=client_id,
|
314 |
+
bridge_type=BridgeType.TCP_SOCKET,
|
315 |
+
remote_address=client_address[0],
|
316 |
+
remote_port=client_address[1],
|
317 |
+
socket=client_socket
|
318 |
+
)
|
319 |
+
|
320 |
+
with self.lock:
|
321 |
+
if len(self.clients) >= self.max_clients:
|
322 |
+
client_socket.close()
|
323 |
+
return
|
324 |
+
|
325 |
+
self.clients[client_id] = client
|
326 |
+
|
327 |
+
self.stats['total_clients'] += 1
|
328 |
+
self.stats['active_clients'] = len(self.clients)
|
329 |
+
self.stats['tcp_connections'] += 1
|
330 |
+
|
331 |
+
logging.info(f"TCP client connected: {client_id} from {client_address}")
|
332 |
+
|
333 |
+
try:
|
334 |
+
client_socket.settimeout(self.client_timeout)
|
335 |
+
|
336 |
+
while client.is_active:
|
337 |
+
try:
|
338 |
+
# Read frame length (4 bytes)
|
339 |
+
length_data = client_socket.recv(4)
|
340 |
+
if not length_data:
|
341 |
+
break
|
342 |
+
|
343 |
+
frame_length = struct.unpack('!I', length_data)[0]
|
344 |
+
if frame_length > 65536: # Sanity check
|
345 |
+
break
|
346 |
+
|
347 |
+
# Read frame data
|
348 |
+
frame_data = b''
|
349 |
+
while len(frame_data) < frame_length:
|
350 |
+
chunk = client_socket.recv(frame_length - len(frame_data))
|
351 |
+
if not chunk:
|
352 |
+
break
|
353 |
+
frame_data += chunk
|
354 |
+
|
355 |
+
if len(frame_data) != frame_length:
|
356 |
+
break
|
357 |
+
|
358 |
+
client.update_activity(1, len(frame_data), 'received')
|
359 |
+
|
360 |
+
# Process frame
|
361 |
+
response = self._process_ethernet_frame(frame_data, client_id)
|
362 |
+
if response:
|
363 |
+
# Send response with length prefix
|
364 |
+
response_length = struct.pack('!I', len(response))
|
365 |
+
client_socket.send(response_length + response)
|
366 |
+
client.update_activity(1, len(response), 'sent')
|
367 |
+
|
368 |
+
except socket.timeout:
|
369 |
+
continue
|
370 |
+
except Exception as e:
|
371 |
+
logging.error(f"TCP client error: {e}")
|
372 |
+
break
|
373 |
+
|
374 |
+
except Exception as e:
|
375 |
+
logging.error(f"TCP client handler error: {e}")
|
376 |
+
self.stats['connection_errors'] += 1
|
377 |
+
|
378 |
+
finally:
|
379 |
+
# Clean up client
|
380 |
+
try:
|
381 |
+
client_socket.close()
|
382 |
+
except:
|
383 |
+
pass
|
384 |
+
|
385 |
+
with self.lock:
|
386 |
+
if client_id in self.clients:
|
387 |
+
self.clients[client_id].is_active = False
|
388 |
+
del self.clients[client_id]
|
389 |
+
|
390 |
+
self.stats['active_clients'] = len(self.clients)
|
391 |
+
logging.info(f"TCP client disconnected: {client_id}")
|
392 |
+
|
393 |
+
def _tcp_server_loop(self):
|
394 |
+
"""TCP server loop"""
|
395 |
+
try:
|
396 |
+
self.tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
397 |
+
self.tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
398 |
+
self.tcp_server_socket.bind((self.tcp_host, self.tcp_port))
|
399 |
+
self.tcp_server_socket.listen(10)
|
400 |
+
|
401 |
+
logging.info(f"TCP bridge server listening on {self.tcp_host}:{self.tcp_port}")
|
402 |
+
|
403 |
+
while self.running:
|
404 |
+
try:
|
405 |
+
client_socket, client_address = self.tcp_server_socket.accept()
|
406 |
+
|
407 |
+
# Handle client in separate thread
|
408 |
+
client_thread = threading.Thread(
|
409 |
+
target=self._handle_tcp_client,
|
410 |
+
args=(client_socket, client_address),
|
411 |
+
daemon=True
|
412 |
+
)
|
413 |
+
client_thread.start()
|
414 |
+
|
415 |
+
except socket.error as e:
|
416 |
+
if self.running:
|
417 |
+
logging.error(f"TCP server error: {e}")
|
418 |
+
time.sleep(1)
|
419 |
+
|
420 |
+
except Exception as e:
|
421 |
+
logging.error(f"TCP server loop error: {e}")
|
422 |
+
|
423 |
+
finally:
|
424 |
+
if self.tcp_server_socket:
|
425 |
+
self.tcp_server_socket.close()
|
426 |
+
|
427 |
+
def _cleanup_loop(self):
|
428 |
+
"""Background cleanup loop"""
|
429 |
+
while self.running:
|
430 |
+
try:
|
431 |
+
current_time = time.time()
|
432 |
+
expired_clients = []
|
433 |
+
|
434 |
+
with self.lock:
|
435 |
+
for client_id, client in self.clients.items():
|
436 |
+
# Mark inactive clients for removal
|
437 |
+
if current_time - client.last_activity > self.client_timeout:
|
438 |
+
expired_clients.append(client_id)
|
439 |
+
|
440 |
+
# Clean up expired clients
|
441 |
+
for client_id in expired_clients:
|
442 |
+
with self.lock:
|
443 |
+
if client_id in self.clients:
|
444 |
+
client = self.clients[client_id]
|
445 |
+
client.is_active = False
|
446 |
+
|
447 |
+
# Close connections
|
448 |
+
if client.websocket:
|
449 |
+
try:
|
450 |
+
asyncio.run_coroutine_threadsafe(
|
451 |
+
client.websocket.close(),
|
452 |
+
self.loop
|
453 |
+
)
|
454 |
+
except:
|
455 |
+
pass
|
456 |
+
|
457 |
+
if client.socket:
|
458 |
+
try:
|
459 |
+
client.socket.close()
|
460 |
+
except:
|
461 |
+
pass
|
462 |
+
|
463 |
+
del self.clients[client_id]
|
464 |
+
logging.info(f"Cleaned up expired client: {client_id}")
|
465 |
+
|
466 |
+
self.stats['active_clients'] = len(self.clients)
|
467 |
+
|
468 |
+
time.sleep(30) # Cleanup every 30 seconds
|
469 |
+
|
470 |
+
except Exception as e:
|
471 |
+
logging.error(f"Cleanup loop error: {e}")
|
472 |
+
time.sleep(5)
|
473 |
+
|
474 |
+
def send_packet_to_client(self, client_id: str, packet_data: bytes) -> bool:
|
475 |
+
"""Send packet to specific client"""
|
476 |
+
with self.lock:
|
477 |
+
client = self.clients.get(client_id)
|
478 |
+
|
479 |
+
if not client or not client.is_active:
|
480 |
+
return False
|
481 |
+
|
482 |
+
try:
|
483 |
+
if client.bridge_type == BridgeType.WEBSOCKET:
|
484 |
+
# Send via WebSocket
|
485 |
+
if client.websocket:
|
486 |
+
asyncio.run_coroutine_threadsafe(
|
487 |
+
client.websocket.send(packet_data),
|
488 |
+
self.loop
|
489 |
+
)
|
490 |
+
client.update_activity(1, len(packet_data), 'sent')
|
491 |
+
return True
|
492 |
+
|
493 |
+
elif client.bridge_type == BridgeType.TCP_SOCKET:
|
494 |
+
# Send via TCP socket with length prefix
|
495 |
+
if client.socket:
|
496 |
+
length_prefix = struct.pack('!I', len(packet_data))
|
497 |
+
client.socket.send(length_prefix + packet_data)
|
498 |
+
client.update_activity(1, len(packet_data), 'sent')
|
499 |
+
return True
|
500 |
+
|
501 |
+
except Exception as e:
|
502 |
+
logging.error(f"Failed to send packet to client {client_id}: {e}")
|
503 |
+
# Mark client as inactive
|
504 |
+
client.is_active = False
|
505 |
+
|
506 |
+
return False
|
507 |
+
|
508 |
+
def broadcast_packet(self, packet_data: bytes, exclude_client: Optional[str] = None) -> int:
|
509 |
+
"""Broadcast packet to all clients"""
|
510 |
+
sent_count = 0
|
511 |
+
|
512 |
+
with self.lock:
|
513 |
+
client_ids = list(self.clients.keys())
|
514 |
+
|
515 |
+
for client_id in client_ids:
|
516 |
+
if client_id != exclude_client:
|
517 |
+
if self.send_packet_to_client(client_id, packet_data):
|
518 |
+
sent_count += 1
|
519 |
+
|
520 |
+
return sent_count
|
521 |
+
|
522 |
+
def get_clients(self) -> Dict[str, Dict]:
|
523 |
+
"""Get all connected clients"""
|
524 |
+
with self.lock:
|
525 |
+
return {
|
526 |
+
client_id: client.to_dict()
|
527 |
+
for client_id, client in self.clients.items()
|
528 |
+
}
|
529 |
+
|
530 |
+
def get_client(self, client_id: str) -> Optional[Dict]:
|
531 |
+
"""Get specific client"""
|
532 |
+
with self.lock:
|
533 |
+
client = self.clients.get(client_id)
|
534 |
+
return client.to_dict() if client else None
|
535 |
+
|
536 |
+
def disconnect_client(self, client_id: str) -> bool:
|
537 |
+
"""Disconnect specific client"""
|
538 |
+
with self.lock:
|
539 |
+
client = self.clients.get(client_id)
|
540 |
+
if not client:
|
541 |
+
return False
|
542 |
+
|
543 |
+
client.is_active = False
|
544 |
+
|
545 |
+
# Close connection
|
546 |
+
if client.websocket:
|
547 |
+
try:
|
548 |
+
asyncio.run_coroutine_threadsafe(
|
549 |
+
client.websocket.close(),
|
550 |
+
self.loop
|
551 |
+
)
|
552 |
+
except:
|
553 |
+
pass
|
554 |
+
|
555 |
+
if client.socket:
|
556 |
+
try:
|
557 |
+
client.socket.close()
|
558 |
+
except:
|
559 |
+
pass
|
560 |
+
|
561 |
+
del self.clients[client_id]
|
562 |
+
self.stats['active_clients'] = len(self.clients)
|
563 |
+
|
564 |
+
return True
|
565 |
+
|
566 |
+
def get_stats(self) -> Dict:
|
567 |
+
"""Get bridge statistics"""
|
568 |
+
with self.lock:
|
569 |
+
stats = self.stats.copy()
|
570 |
+
stats['active_clients'] = len(self.clients)
|
571 |
+
|
572 |
+
return stats
|
573 |
+
|
574 |
+
def reset_stats(self):
|
575 |
+
"""Reset bridge statistics"""
|
576 |
+
self.stats = {
|
577 |
+
'total_clients': 0,
|
578 |
+
'active_clients': len(self.clients),
|
579 |
+
'packets_processed': 0,
|
580 |
+
'packets_forwarded': 0,
|
581 |
+
'packets_dropped': 0,
|
582 |
+
'bytes_processed': 0,
|
583 |
+
'websocket_connections': 0,
|
584 |
+
'tcp_connections': 0,
|
585 |
+
'connection_errors': 0
|
586 |
+
}
|
587 |
+
|
588 |
+
async def start_websocket_server(self):
|
589 |
+
"""Start WebSocket server"""
|
590 |
+
try:
|
591 |
+
self.websocket_server = await websockets.serve(
|
592 |
+
self._handle_websocket_client,
|
593 |
+
self.websocket_host,
|
594 |
+
self.websocket_port,
|
595 |
+
max_size=1024*1024, # 1MB max message size
|
596 |
+
ping_interval=30,
|
597 |
+
ping_timeout=10
|
598 |
+
)
|
599 |
+
|
600 |
+
logging.info(f"WebSocket bridge server started on {self.websocket_host}:{self.websocket_port}")
|
601 |
+
|
602 |
+
# Keep server running
|
603 |
+
await self.websocket_server.wait_closed()
|
604 |
+
|
605 |
+
except Exception as e:
|
606 |
+
logging.error(f"WebSocket server error: {e}")
|
607 |
+
|
608 |
+
def start(self):
|
609 |
+
"""Start packet bridge"""
|
610 |
+
self.running = True
|
611 |
+
|
612 |
+
# Start event loop
|
613 |
+
self.loop = asyncio.new_event_loop()
|
614 |
+
asyncio.set_event_loop(self.loop)
|
615 |
+
|
616 |
+
# Start WebSocket server in a separate thread
|
617 |
+
websocket_thread = threading.Thread(target=self._run_websocket_server_in_thread, daemon=True)
|
618 |
+
websocket_thread.start()
|
619 |
+
|
620 |
+
# Start TCP server in separate thread
|
621 |
+
tcp_thread = threading.Thread(target=self._tcp_server_loop, daemon=True)
|
622 |
+
tcp_thread.start()
|
623 |
+
|
624 |
+
# Start cleanup thread
|
625 |
+
cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
|
626 |
+
cleanup_thread.start()
|
627 |
+
|
628 |
+
logging.info("Packet bridge started")
|
629 |
+
|
630 |
+
|
631 |
+
|
632 |
+
def stop(self):
|
633 |
+
"""Stop packet bridge"""
|
634 |
+
self.running = False
|
635 |
+
|
636 |
+
# Close WebSocket server
|
637 |
+
if self.websocket_server:
|
638 |
+
self.websocket_server.close()
|
639 |
+
|
640 |
+
# Close TCP server
|
641 |
+
if self.tcp_server_socket:
|
642 |
+
self.tcp_server_socket.close()
|
643 |
+
|
644 |
+
# Disconnect all clients
|
645 |
+
with self.lock:
|
646 |
+
client_ids = list(self.clients.keys())
|
647 |
+
|
648 |
+
for client_id in client_ids:
|
649 |
+
self.disconnect_client(client_id)
|
650 |
+
|
651 |
+
# Stop event loop
|
652 |
+
if self.loop and not self.loop.is_closed():
|
653 |
+
self.loop.call_soon_threadsafe(self.loop.stop)
|
654 |
+
|
655 |
+
logging.info("Packet bridge stopped")
|
656 |
+
|
657 |
+
|
658 |
+
|
659 |
+
def _run_websocket_server_in_thread(self):
|
660 |
+
"""Run the WebSocket server in a separate thread with its own event loop."""
|
661 |
+
asyncio.set_event_loop(self.loop)
|
662 |
+
self.loop.run_until_complete(self.start_websocket_server())
|
663 |
+
|
664 |
+
|
core/session_tracker.py
ADDED
@@ -0,0 +1,602 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Session Tracker Module
|
3 |
+
|
4 |
+
Manages and tracks all network sessions across the virtual ISP stack:
|
5 |
+
- Unified session management across all modules
|
6 |
+
- Session lifecycle tracking
|
7 |
+
- Performance metrics and analytics
|
8 |
+
- Session correlation and debugging
|
9 |
+
"""
|
10 |
+
|
11 |
+
import time
|
12 |
+
import threading
|
13 |
+
import uuid
|
14 |
+
from typing import Dict, List, Optional, Set, Any, Tuple
|
15 |
+
from dataclasses import dataclass, field
|
16 |
+
from enum import Enum
|
17 |
+
import json
|
18 |
+
|
19 |
+
from .dhcp_server import DHCPLease
|
20 |
+
from .nat_engine import NATSession
|
21 |
+
from .tcp_engine import TCPConnection
|
22 |
+
from .socket_translator import SocketConnection
|
23 |
+
|
24 |
+
|
25 |
+
class SessionType(Enum):
|
26 |
+
DHCP_LEASE = "DHCP_LEASE"
|
27 |
+
NAT_SESSION = "NAT_SESSION"
|
28 |
+
TCP_CONNECTION = "TCP_CONNECTION"
|
29 |
+
SOCKET_CONNECTION = "SOCKET_CONNECTION"
|
30 |
+
BRIDGE_CLIENT = "BRIDGE_CLIENT"
|
31 |
+
|
32 |
+
|
33 |
+
class SessionState(Enum):
|
34 |
+
INITIALIZING = "INITIALIZING"
|
35 |
+
ACTIVE = "ACTIVE"
|
36 |
+
IDLE = "IDLE"
|
37 |
+
CLOSING = "CLOSING"
|
38 |
+
CLOSED = "CLOSED"
|
39 |
+
ERROR = "ERROR"
|
40 |
+
|
41 |
+
|
42 |
+
@dataclass
|
43 |
+
class SessionMetrics:
|
44 |
+
"""Session performance metrics"""
|
45 |
+
bytes_in: int = 0
|
46 |
+
bytes_out: int = 0
|
47 |
+
packets_in: int = 0
|
48 |
+
packets_out: int = 0
|
49 |
+
errors: int = 0
|
50 |
+
retransmits: int = 0
|
51 |
+
rtt_samples: List[float] = field(default_factory=list)
|
52 |
+
|
53 |
+
@property
|
54 |
+
def total_bytes(self) -> int:
|
55 |
+
return self.bytes_in + self.bytes_out
|
56 |
+
|
57 |
+
@property
|
58 |
+
def total_packets(self) -> int:
|
59 |
+
return self.packets_in + self.packets_out
|
60 |
+
|
61 |
+
@property
|
62 |
+
def average_rtt(self) -> float:
|
63 |
+
return sum(self.rtt_samples) / len(self.rtt_samples) if self.rtt_samples else 0.0
|
64 |
+
|
65 |
+
def update_bytes(self, bytes_in: int = 0, bytes_out: int = 0):
|
66 |
+
"""Update byte counters"""
|
67 |
+
self.bytes_in += bytes_in
|
68 |
+
self.bytes_out += bytes_out
|
69 |
+
|
70 |
+
def update_packets(self, packets_in: int = 0, packets_out: int = 0):
|
71 |
+
"""Update packet counters"""
|
72 |
+
self.packets_in += packets_in
|
73 |
+
self.packets_out += packets_out
|
74 |
+
|
75 |
+
def add_rtt_sample(self, rtt: float):
|
76 |
+
"""Add RTT sample"""
|
77 |
+
self.rtt_samples.append(rtt)
|
78 |
+
# Keep only last 100 samples
|
79 |
+
if len(self.rtt_samples) > 100:
|
80 |
+
self.rtt_samples = self.rtt_samples[-100:]
|
81 |
+
|
82 |
+
def to_dict(self) -> Dict:
|
83 |
+
"""Convert to dictionary"""
|
84 |
+
return {
|
85 |
+
'bytes_in': self.bytes_in,
|
86 |
+
'bytes_out': self.bytes_out,
|
87 |
+
'packets_in': self.packets_in,
|
88 |
+
'packets_out': self.packets_out,
|
89 |
+
'total_bytes': self.total_bytes,
|
90 |
+
'total_packets': self.total_packets,
|
91 |
+
'errors': self.errors,
|
92 |
+
'retransmits': self.retransmits,
|
93 |
+
'average_rtt': self.average_rtt,
|
94 |
+
'rtt_samples_count': len(self.rtt_samples)
|
95 |
+
}
|
96 |
+
|
97 |
+
|
98 |
+
@dataclass
|
99 |
+
class UnifiedSession:
|
100 |
+
"""Unified session representation"""
|
101 |
+
session_id: str
|
102 |
+
session_type: SessionType
|
103 |
+
state: SessionState
|
104 |
+
created_time: float
|
105 |
+
last_activity: float
|
106 |
+
|
107 |
+
# Session identifiers
|
108 |
+
virtual_ip: Optional[str] = None
|
109 |
+
virtual_port: Optional[int] = None
|
110 |
+
real_ip: Optional[str] = None
|
111 |
+
real_port: Optional[int] = None
|
112 |
+
protocol: Optional[str] = None
|
113 |
+
|
114 |
+
# Related sessions (for correlation)
|
115 |
+
related_sessions: Set[str] = field(default_factory=set)
|
116 |
+
parent_session: Optional[str] = None
|
117 |
+
child_sessions: Set[str] = field(default_factory=set)
|
118 |
+
|
119 |
+
# Metrics
|
120 |
+
metrics: SessionMetrics = field(default_factory=SessionMetrics)
|
121 |
+
|
122 |
+
# Additional data
|
123 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
124 |
+
|
125 |
+
def __post_init__(self):
|
126 |
+
if not self.session_id:
|
127 |
+
self.session_id = str(uuid.uuid4())
|
128 |
+
if self.created_time == 0:
|
129 |
+
self.created_time = time.time()
|
130 |
+
if self.last_activity == 0:
|
131 |
+
self.last_activity = time.time()
|
132 |
+
|
133 |
+
def update_activity(self):
|
134 |
+
"""Update last activity timestamp"""
|
135 |
+
self.last_activity = time.time()
|
136 |
+
|
137 |
+
def add_related_session(self, session_id: str):
|
138 |
+
"""Add related session"""
|
139 |
+
self.related_sessions.add(session_id)
|
140 |
+
|
141 |
+
def add_child_session(self, session_id: str):
|
142 |
+
"""Add child session"""
|
143 |
+
self.child_sessions.add(session_id)
|
144 |
+
|
145 |
+
def set_parent_session(self, session_id: str):
|
146 |
+
"""Set parent session"""
|
147 |
+
self.parent_session = session_id
|
148 |
+
|
149 |
+
@property
|
150 |
+
def duration(self) -> float:
|
151 |
+
"""Get session duration in seconds"""
|
152 |
+
return time.time() - self.created_time
|
153 |
+
|
154 |
+
@property
|
155 |
+
def idle_time(self) -> float:
|
156 |
+
"""Get idle time in seconds"""
|
157 |
+
return time.time() - self.last_activity
|
158 |
+
|
159 |
+
def to_dict(self) -> Dict:
|
160 |
+
"""Convert to dictionary"""
|
161 |
+
return {
|
162 |
+
'session_id': self.session_id,
|
163 |
+
'session_type': self.session_type.value,
|
164 |
+
'state': self.state.value,
|
165 |
+
'created_time': self.created_time,
|
166 |
+
'last_activity': self.last_activity,
|
167 |
+
'duration': self.duration,
|
168 |
+
'idle_time': self.idle_time,
|
169 |
+
'virtual_ip': self.virtual_ip,
|
170 |
+
'virtual_port': self.virtual_port,
|
171 |
+
'real_ip': self.real_ip,
|
172 |
+
'real_port': self.real_port,
|
173 |
+
'protocol': self.protocol,
|
174 |
+
'related_sessions': list(self.related_sessions),
|
175 |
+
'parent_session': self.parent_session,
|
176 |
+
'child_sessions': list(self.child_sessions),
|
177 |
+
'metrics': self.metrics.to_dict(),
|
178 |
+
'metadata': self.metadata
|
179 |
+
}
|
180 |
+
|
181 |
+
|
182 |
+
class SessionTracker:
|
183 |
+
"""Unified session tracker"""
|
184 |
+
|
185 |
+
def __init__(self, config: Dict):
|
186 |
+
self.config = config
|
187 |
+
self.sessions: Dict[str, UnifiedSession] = {}
|
188 |
+
self.session_index: Dict[Tuple[str, str], Set[str]] = {} # (type, key) -> session_ids
|
189 |
+
self.lock = threading.Lock()
|
190 |
+
|
191 |
+
# Configuration
|
192 |
+
self.max_sessions = config.get('max_sessions', 10000)
|
193 |
+
self.session_timeout = config.get('session_timeout', 3600)
|
194 |
+
self.cleanup_interval = config.get('cleanup_interval', 300)
|
195 |
+
self.metrics_retention = config.get('metrics_retention', 86400) # 24 hours
|
196 |
+
|
197 |
+
# Statistics
|
198 |
+
self.stats = {
|
199 |
+
'total_sessions': 0,
|
200 |
+
'active_sessions': 0,
|
201 |
+
'expired_sessions': 0,
|
202 |
+
'session_types': {t.value: 0 for t in SessionType},
|
203 |
+
'session_states': {s.value: 0 for s in SessionState},
|
204 |
+
'cleanup_runs': 0,
|
205 |
+
'correlations_created': 0
|
206 |
+
}
|
207 |
+
|
208 |
+
# Background tasks
|
209 |
+
self.running = False
|
210 |
+
self.cleanup_thread = None
|
211 |
+
|
212 |
+
def _generate_session_key(self, session_type: SessionType, **kwargs) -> str:
|
213 |
+
"""Generate session key for indexing"""
|
214 |
+
if session_type == SessionType.DHCP_LEASE:
|
215 |
+
return f"dhcp_{kwargs.get('mac_address', 'unknown')}"
|
216 |
+
elif session_type == SessionType.NAT_SESSION:
|
217 |
+
return f"nat_{kwargs.get('virtual_ip', '')}_{kwargs.get('virtual_port', 0)}_{kwargs.get('protocol', '')}"
|
218 |
+
elif session_type == SessionType.TCP_CONNECTION:
|
219 |
+
return f"tcp_{kwargs.get('local_ip', '')}_{kwargs.get('local_port', 0)}_{kwargs.get('remote_ip', '')}_{kwargs.get('remote_port', 0)}"
|
220 |
+
elif session_type == SessionType.SOCKET_CONNECTION:
|
221 |
+
return f"socket_{kwargs.get('connection_id', 'unknown')}"
|
222 |
+
elif session_type == SessionType.BRIDGE_CLIENT:
|
223 |
+
return f"bridge_{kwargs.get('client_id', 'unknown')}"
|
224 |
+
else:
|
225 |
+
return f"unknown_{time.time()}"
|
226 |
+
|
227 |
+
def _add_to_index(self, session: UnifiedSession):
|
228 |
+
"""Add session to search index"""
|
229 |
+
# Index by type
|
230 |
+
type_key = (session.session_type.value, 'all')
|
231 |
+
if type_key not in self.session_index:
|
232 |
+
self.session_index[type_key] = set()
|
233 |
+
self.session_index[type_key].add(session.session_id)
|
234 |
+
|
235 |
+
# Index by IP addresses
|
236 |
+
if session.virtual_ip:
|
237 |
+
ip_key = ('virtual_ip', session.virtual_ip)
|
238 |
+
if ip_key not in self.session_index:
|
239 |
+
self.session_index[ip_key] = set()
|
240 |
+
self.session_index[ip_key].add(session.session_id)
|
241 |
+
|
242 |
+
if session.real_ip:
|
243 |
+
ip_key = ('real_ip', session.real_ip)
|
244 |
+
if ip_key not in self.session_index:
|
245 |
+
self.session_index[ip_key] = set()
|
246 |
+
self.session_index[ip_key].add(session.session_id)
|
247 |
+
|
248 |
+
# Index by protocol
|
249 |
+
if session.protocol:
|
250 |
+
proto_key = ('protocol', session.protocol)
|
251 |
+
if proto_key not in self.session_index:
|
252 |
+
self.session_index[proto_key] = set()
|
253 |
+
self.session_index[proto_key].add(session.session_id)
|
254 |
+
|
255 |
+
def _remove_from_index(self, session: UnifiedSession):
|
256 |
+
"""Remove session from search index"""
|
257 |
+
for key, session_set in self.session_index.items():
|
258 |
+
session_set.discard(session.session_id)
|
259 |
+
|
260 |
+
def create_session(self, session_type: SessionType, **kwargs) -> str:
|
261 |
+
"""Create new session"""
|
262 |
+
with self.lock:
|
263 |
+
# Check session limit
|
264 |
+
if len(self.sessions) >= self.max_sessions:
|
265 |
+
# Remove oldest expired session
|
266 |
+
self._cleanup_expired_sessions()
|
267 |
+
if len(self.sessions) >= self.max_sessions:
|
268 |
+
return None
|
269 |
+
|
270 |
+
# Create session
|
271 |
+
session = UnifiedSession(
|
272 |
+
session_id=kwargs.get('session_id', str(uuid.uuid4())),
|
273 |
+
session_type=session_type,
|
274 |
+
state=SessionState.INITIALIZING,
|
275 |
+
virtual_ip=kwargs.get('virtual_ip'),
|
276 |
+
virtual_port=kwargs.get('virtual_port'),
|
277 |
+
real_ip=kwargs.get('real_ip'),
|
278 |
+
real_port=kwargs.get('real_port'),
|
279 |
+
protocol=kwargs.get('protocol'),
|
280 |
+
metadata=kwargs.get('metadata', {})
|
281 |
+
)
|
282 |
+
|
283 |
+
# Add to sessions
|
284 |
+
self.sessions[session.session_id] = session
|
285 |
+
self._add_to_index(session)
|
286 |
+
|
287 |
+
# Update statistics
|
288 |
+
self.stats['total_sessions'] += 1
|
289 |
+
self.stats['active_sessions'] = len(self.sessions)
|
290 |
+
self.stats['session_types'][session_type.value] += 1
|
291 |
+
self.stats['session_states'][SessionState.INITIALIZING.value] += 1
|
292 |
+
|
293 |
+
return session.session_id
|
294 |
+
|
295 |
+
def update_session(self, session_id: str, **kwargs) -> bool:
|
296 |
+
"""Update session"""
|
297 |
+
with self.lock:
|
298 |
+
session = self.sessions.get(session_id)
|
299 |
+
if not session:
|
300 |
+
return False
|
301 |
+
|
302 |
+
# Update fields
|
303 |
+
old_state = session.state
|
304 |
+
|
305 |
+
for key, value in kwargs.items():
|
306 |
+
if hasattr(session, key):
|
307 |
+
setattr(session, key, value)
|
308 |
+
|
309 |
+
session.update_activity()
|
310 |
+
|
311 |
+
# Update state statistics
|
312 |
+
if 'state' in kwargs and kwargs['state'] != old_state:
|
313 |
+
self.stats['session_states'][old_state.value] -= 1
|
314 |
+
self.stats['session_states'][kwargs['state'].value] += 1
|
315 |
+
|
316 |
+
return True
|
317 |
+
|
318 |
+
def close_session(self, session_id: str, reason: str = "") -> bool:
|
319 |
+
"""Close session"""
|
320 |
+
with self.lock:
|
321 |
+
session = self.sessions.get(session_id)
|
322 |
+
if not session:
|
323 |
+
return False
|
324 |
+
|
325 |
+
old_state = session.state
|
326 |
+
session.state = SessionState.CLOSED
|
327 |
+
session.update_activity()
|
328 |
+
|
329 |
+
if reason:
|
330 |
+
session.metadata['close_reason'] = reason
|
331 |
+
|
332 |
+
# Update statistics
|
333 |
+
self.stats['session_states'][old_state.value] -= 1
|
334 |
+
self.stats['session_states'][SessionState.CLOSED.value] += 1
|
335 |
+
|
336 |
+
return True
|
337 |
+
|
338 |
+
def remove_session(self, session_id: str) -> bool:
|
339 |
+
"""Remove session completely"""
|
340 |
+
with self.lock:
|
341 |
+
session = self.sessions.get(session_id)
|
342 |
+
if not session:
|
343 |
+
return False
|
344 |
+
|
345 |
+
# Remove from index
|
346 |
+
self._remove_from_index(session)
|
347 |
+
|
348 |
+
# Remove from sessions
|
349 |
+
del self.sessions[session_id]
|
350 |
+
|
351 |
+
# Update statistics
|
352 |
+
self.stats['active_sessions'] = len(self.sessions)
|
353 |
+
self.stats['session_types'][session.session_type.value] -= 1
|
354 |
+
self.stats['session_states'][session.state.value] -= 1
|
355 |
+
|
356 |
+
return True
|
357 |
+
|
358 |
+
def get_session(self, session_id: str) -> Optional[UnifiedSession]:
|
359 |
+
"""Get session by ID"""
|
360 |
+
with self.lock:
|
361 |
+
return self.sessions.get(session_id)
|
362 |
+
|
363 |
+
def find_sessions(self, **criteria) -> List[UnifiedSession]:
|
364 |
+
"""Find sessions by criteria"""
|
365 |
+
with self.lock:
|
366 |
+
matching_sessions = []
|
367 |
+
|
368 |
+
# Use index if possible
|
369 |
+
if 'session_type' in criteria:
|
370 |
+
type_key = (criteria['session_type'].value if isinstance(criteria['session_type'], SessionType) else criteria['session_type'], 'all')
|
371 |
+
candidate_ids = self.session_index.get(type_key, set())
|
372 |
+
elif 'virtual_ip' in criteria:
|
373 |
+
ip_key = ('virtual_ip', criteria['virtual_ip'])
|
374 |
+
candidate_ids = self.session_index.get(ip_key, set())
|
375 |
+
elif 'real_ip' in criteria:
|
376 |
+
ip_key = ('real_ip', criteria['real_ip'])
|
377 |
+
candidate_ids = self.session_index.get(ip_key, set())
|
378 |
+
elif 'protocol' in criteria:
|
379 |
+
proto_key = ('protocol', criteria['protocol'])
|
380 |
+
candidate_ids = self.session_index.get(proto_key, set())
|
381 |
+
else:
|
382 |
+
candidate_ids = set(self.sessions.keys())
|
383 |
+
|
384 |
+
# Filter candidates
|
385 |
+
for session_id in candidate_ids:
|
386 |
+
session = self.sessions.get(session_id)
|
387 |
+
if not session:
|
388 |
+
continue
|
389 |
+
|
390 |
+
match = True
|
391 |
+
for key, value in criteria.items():
|
392 |
+
if hasattr(session, key):
|
393 |
+
session_value = getattr(session, key)
|
394 |
+
if isinstance(value, (SessionType, SessionState)):
|
395 |
+
if session_value != value:
|
396 |
+
match = False
|
397 |
+
break
|
398 |
+
elif session_value != value:
|
399 |
+
match = False
|
400 |
+
break
|
401 |
+
else:
|
402 |
+
match = False
|
403 |
+
break
|
404 |
+
|
405 |
+
if match:
|
406 |
+
matching_sessions.append(session)
|
407 |
+
|
408 |
+
return matching_sessions
|
409 |
+
|
410 |
+
def correlate_sessions(self, session_id1: str, session_id2: str, relationship: str = 'related') -> bool:
|
411 |
+
"""Create correlation between sessions"""
|
412 |
+
with self.lock:
|
413 |
+
session1 = self.sessions.get(session_id1)
|
414 |
+
session2 = self.sessions.get(session_id2)
|
415 |
+
|
416 |
+
if not session1 or not session2:
|
417 |
+
return False
|
418 |
+
|
419 |
+
if relationship == 'parent_child':
|
420 |
+
session1.add_child_session(session_id2)
|
421 |
+
session2.set_parent_session(session_id1)
|
422 |
+
else:
|
423 |
+
session1.add_related_session(session_id2)
|
424 |
+
session2.add_related_session(session_id1)
|
425 |
+
|
426 |
+
self.stats['correlations_created'] += 1
|
427 |
+
return True
|
428 |
+
|
429 |
+
def update_metrics(self, session_id: str, **metrics) -> bool:
|
430 |
+
"""Update session metrics"""
|
431 |
+
with self.lock:
|
432 |
+
session = self.sessions.get(session_id)
|
433 |
+
if not session:
|
434 |
+
return False
|
435 |
+
|
436 |
+
session.update_activity()
|
437 |
+
|
438 |
+
# Update metrics
|
439 |
+
if 'bytes_in' in metrics or 'bytes_out' in metrics:
|
440 |
+
session.metrics.update_bytes(
|
441 |
+
metrics.get('bytes_in', 0),
|
442 |
+
metrics.get('bytes_out', 0)
|
443 |
+
)
|
444 |
+
|
445 |
+
if 'packets_in' in metrics or 'packets_out' in metrics:
|
446 |
+
session.metrics.update_packets(
|
447 |
+
metrics.get('packets_in', 0),
|
448 |
+
metrics.get('packets_out', 0)
|
449 |
+
)
|
450 |
+
|
451 |
+
if 'rtt' in metrics:
|
452 |
+
session.metrics.add_rtt_sample(metrics['rtt'])
|
453 |
+
|
454 |
+
if 'errors' in metrics:
|
455 |
+
session.metrics.errors += metrics['errors']
|
456 |
+
|
457 |
+
if 'retransmits' in metrics:
|
458 |
+
session.metrics.retransmits += metrics['retransmits']
|
459 |
+
|
460 |
+
return True
|
461 |
+
|
462 |
+
def _cleanup_expired_sessions(self):
|
463 |
+
"""Clean up expired sessions"""
|
464 |
+
current_time = time.time()
|
465 |
+
expired_sessions = []
|
466 |
+
|
467 |
+
for session_id, session in self.sessions.items():
|
468 |
+
# Check if session is expired
|
469 |
+
if (session.state == SessionState.CLOSED and
|
470 |
+
current_time - session.last_activity > self.cleanup_interval):
|
471 |
+
expired_sessions.append(session_id)
|
472 |
+
elif (session.state != SessionState.CLOSED and
|
473 |
+
current_time - session.last_activity > self.session_timeout):
|
474 |
+
expired_sessions.append(session_id)
|
475 |
+
|
476 |
+
# Remove expired sessions
|
477 |
+
for session_id in expired_sessions:
|
478 |
+
self.remove_session(session_id)
|
479 |
+
self.stats['expired_sessions'] += 1
|
480 |
+
|
481 |
+
def _cleanup_loop(self):
|
482 |
+
"""Background cleanup loop"""
|
483 |
+
while self.running:
|
484 |
+
try:
|
485 |
+
with self.lock:
|
486 |
+
self._cleanup_expired_sessions()
|
487 |
+
self.stats['cleanup_runs'] += 1
|
488 |
+
|
489 |
+
time.sleep(self.cleanup_interval)
|
490 |
+
|
491 |
+
except Exception as e:
|
492 |
+
print(f"Session tracker cleanup error: {e}")
|
493 |
+
time.sleep(60)
|
494 |
+
|
495 |
+
def get_sessions(self, limit: int = 100, offset: int = 0, **filters) -> List[Dict]:
|
496 |
+
"""Get sessions with pagination and filtering"""
|
497 |
+
with self.lock:
|
498 |
+
if filters:
|
499 |
+
sessions = self.find_sessions(**filters)
|
500 |
+
else:
|
501 |
+
sessions = list(self.sessions.values())
|
502 |
+
|
503 |
+
# Sort by last activity (most recent first)
|
504 |
+
sessions.sort(key=lambda s: s.last_activity, reverse=True)
|
505 |
+
|
506 |
+
# Apply pagination
|
507 |
+
paginated_sessions = sessions[offset:offset + limit]
|
508 |
+
|
509 |
+
return [session.to_dict() for session in paginated_sessions]
|
510 |
+
|
511 |
+
def get_session_summary(self) -> Dict:
|
512 |
+
"""Get session summary statistics"""
|
513 |
+
with self.lock:
|
514 |
+
summary = {
|
515 |
+
'total_sessions': len(self.sessions),
|
516 |
+
'by_type': {},
|
517 |
+
'by_state': {},
|
518 |
+
'by_protocol': {},
|
519 |
+
'active_sessions_by_age': {
|
520 |
+
'last_hour': 0,
|
521 |
+
'last_day': 0,
|
522 |
+
'older': 0
|
523 |
+
}
|
524 |
+
}
|
525 |
+
|
526 |
+
current_time = time.time()
|
527 |
+
hour_ago = current_time - 3600
|
528 |
+
day_ago = current_time - 86400
|
529 |
+
|
530 |
+
for session in self.sessions.values():
|
531 |
+
# Count by type
|
532 |
+
session_type = session.session_type.value
|
533 |
+
summary['by_type'][session_type] = summary['by_type'].get(session_type, 0) + 1
|
534 |
+
|
535 |
+
# Count by state
|
536 |
+
session_state = session.state.value
|
537 |
+
summary['by_state'][session_state] = summary['by_state'].get(session_state, 0) + 1
|
538 |
+
|
539 |
+
# Count by protocol
|
540 |
+
if session.protocol:
|
541 |
+
summary['by_protocol'][session.protocol] = summary['by_protocol'].get(session.protocol, 0) + 1
|
542 |
+
|
543 |
+
# Count by age
|
544 |
+
if session.last_activity > hour_ago:
|
545 |
+
summary['active_sessions_by_age']['last_hour'] += 1
|
546 |
+
elif session.last_activity > day_ago:
|
547 |
+
summary['active_sessions_by_age']['last_day'] += 1
|
548 |
+
else:
|
549 |
+
summary['active_sessions_by_age']['older'] += 1
|
550 |
+
|
551 |
+
return summary
|
552 |
+
|
553 |
+
def get_stats(self) -> Dict:
|
554 |
+
"""Get tracker statistics"""
|
555 |
+
with self.lock:
|
556 |
+
stats = self.stats.copy()
|
557 |
+
stats['active_sessions'] = len(self.sessions)
|
558 |
+
|
559 |
+
return stats
|
560 |
+
|
561 |
+
def reset_stats(self):
|
562 |
+
"""Reset statistics"""
|
563 |
+
self.stats = {
|
564 |
+
'total_sessions': len(self.sessions),
|
565 |
+
'active_sessions': len(self.sessions),
|
566 |
+
'expired_sessions': 0,
|
567 |
+
'session_types': {t.value: 0 for t in SessionType},
|
568 |
+
'session_states': {s.value: 0 for s in SessionState},
|
569 |
+
'cleanup_runs': 0,
|
570 |
+
'correlations_created': 0
|
571 |
+
}
|
572 |
+
|
573 |
+
# Recalculate current counts
|
574 |
+
with self.lock:
|
575 |
+
for session in self.sessions.values():
|
576 |
+
self.stats['session_types'][session.session_type.value] += 1
|
577 |
+
self.stats['session_states'][session.state.value] += 1
|
578 |
+
|
579 |
+
def export_sessions(self, format: str = 'json') -> str:
|
580 |
+
"""Export sessions data"""
|
581 |
+
with self.lock:
|
582 |
+
sessions_data = [session.to_dict() for session in self.sessions.values()]
|
583 |
+
|
584 |
+
if format == 'json':
|
585 |
+
return json.dumps(sessions_data, indent=2, default=str)
|
586 |
+
else:
|
587 |
+
raise ValueError(f"Unsupported export format: {format}")
|
588 |
+
|
589 |
+
def start(self):
|
590 |
+
"""Start session tracker"""
|
591 |
+
self.running = True
|
592 |
+
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
|
593 |
+
self.cleanup_thread.start()
|
594 |
+
print("Session tracker started")
|
595 |
+
|
596 |
+
def stop(self):
|
597 |
+
"""Stop session tracker"""
|
598 |
+
self.running = False
|
599 |
+
if self.cleanup_thread:
|
600 |
+
self.cleanup_thread.join()
|
601 |
+
print("Session tracker stopped")
|
602 |
+
|
core/socket_translator.py
ADDED
@@ -0,0 +1,653 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Socket Translator Module
|
3 |
+
|
4 |
+
Bridges virtual connections to real host sockets:
|
5 |
+
- Map virtual connections to host sockets/HTTP clients
|
6 |
+
- Bidirectional data streaming
|
7 |
+
- Connection lifecycle management
|
8 |
+
- Protocol translation (TCP/UDP to host sockets)
|
9 |
+
"""
|
10 |
+
|
11 |
+
import socket
|
12 |
+
import threading
|
13 |
+
import time
|
14 |
+
import asyncio
|
15 |
+
import aiohttp
|
16 |
+
import ssl
|
17 |
+
from typing import Dict, Optional, Callable, Tuple, Any
|
18 |
+
from dataclasses import dataclass
|
19 |
+
from enum import Enum
|
20 |
+
import urllib.parse
|
21 |
+
import json
|
22 |
+
|
23 |
+
from .tcp_engine import TCPConnection
|
24 |
+
|
25 |
+
|
26 |
+
class ConnectionType(Enum):
|
27 |
+
TCP_SOCKET = "TCP_SOCKET"
|
28 |
+
UDP_SOCKET = "UDP_SOCKET"
|
29 |
+
HTTP_CLIENT = "HTTP_CLIENT"
|
30 |
+
HTTPS_CLIENT = "HTTPS_CLIENT"
|
31 |
+
|
32 |
+
|
33 |
+
@dataclass
|
34 |
+
class SocketConnection:
|
35 |
+
"""Represents a socket connection"""
|
36 |
+
connection_id: str
|
37 |
+
connection_type: ConnectionType
|
38 |
+
virtual_connection: Optional[TCPConnection]
|
39 |
+
host_socket: Optional[socket.socket]
|
40 |
+
remote_host: str
|
41 |
+
remote_port: int
|
42 |
+
created_time: float
|
43 |
+
last_activity: float
|
44 |
+
bytes_sent: int = 0
|
45 |
+
bytes_received: int = 0
|
46 |
+
is_connected: bool = False
|
47 |
+
error_count: int = 0
|
48 |
+
|
49 |
+
def update_activity(self, bytes_transferred: int = 0, direction: str = 'sent'):
|
50 |
+
"""Update connection activity"""
|
51 |
+
self.last_activity = time.time()
|
52 |
+
if direction == 'sent':
|
53 |
+
self.bytes_sent += bytes_transferred
|
54 |
+
else:
|
55 |
+
self.bytes_received += bytes_transferred
|
56 |
+
|
57 |
+
def to_dict(self) -> Dict:
|
58 |
+
"""Convert to dictionary"""
|
59 |
+
return {
|
60 |
+
'connection_id': self.connection_id,
|
61 |
+
'connection_type': self.connection_type.value,
|
62 |
+
'remote_host': self.remote_host,
|
63 |
+
'remote_port': self.remote_port,
|
64 |
+
'created_time': self.created_time,
|
65 |
+
'last_activity': self.last_activity,
|
66 |
+
'bytes_sent': self.bytes_sent,
|
67 |
+
'bytes_received': self.bytes_received,
|
68 |
+
'is_connected': self.is_connected,
|
69 |
+
'error_count': self.error_count,
|
70 |
+
'duration': time.time() - self.created_time
|
71 |
+
}
|
72 |
+
|
73 |
+
|
74 |
+
class HTTPRequest:
|
75 |
+
"""Represents an HTTP request"""
|
76 |
+
|
77 |
+
def __init__(self, method: str = 'GET', path: str = '/', headers: Dict[str, str] = None, body: bytes = b''):
|
78 |
+
self.method = method.upper()
|
79 |
+
self.path = path
|
80 |
+
self.headers = headers or {}
|
81 |
+
self.body = body
|
82 |
+
self.version = 'HTTP/1.1'
|
83 |
+
|
84 |
+
@classmethod
|
85 |
+
def parse(cls, data: bytes) -> Optional['HTTPRequest']:
|
86 |
+
"""Parse HTTP request from raw data"""
|
87 |
+
try:
|
88 |
+
lines = data.decode('utf-8', errors='ignore').split('\r\n')
|
89 |
+
if not lines:
|
90 |
+
return None
|
91 |
+
|
92 |
+
# Parse request line
|
93 |
+
request_line = lines[0].split(' ')
|
94 |
+
if len(request_line) < 3:
|
95 |
+
return None
|
96 |
+
|
97 |
+
method, path, version = request_line[0], request_line[1], request_line[2]
|
98 |
+
|
99 |
+
# Parse headers
|
100 |
+
headers = {}
|
101 |
+
body_start = 1
|
102 |
+
for i, line in enumerate(lines[1:], 1):
|
103 |
+
if line == '':
|
104 |
+
body_start = i + 1
|
105 |
+
break
|
106 |
+
if ':' in line:
|
107 |
+
key, value = line.split(':', 1)
|
108 |
+
headers[key.strip().lower()] = value.strip()
|
109 |
+
|
110 |
+
# Parse body
|
111 |
+
body_lines = lines[body_start:]
|
112 |
+
body = '\r\n'.join(body_lines).encode('utf-8')
|
113 |
+
|
114 |
+
return cls(method, path, headers, body)
|
115 |
+
|
116 |
+
except Exception:
|
117 |
+
return None
|
118 |
+
|
119 |
+
def to_bytes(self) -> bytes:
|
120 |
+
"""Convert to raw HTTP request"""
|
121 |
+
request_line = f"{self.method} {self.path} {self.version}\r\n"
|
122 |
+
|
123 |
+
# Add default headers
|
124 |
+
if 'host' not in self.headers:
|
125 |
+
self.headers['host'] = 'localhost'
|
126 |
+
if 'user-agent' not in self.headers:
|
127 |
+
self.headers['user-agent'] = 'VirtualISP/1.0'
|
128 |
+
if self.body and 'content-length' not in self.headers:
|
129 |
+
self.headers['content-length'] = str(len(self.body))
|
130 |
+
|
131 |
+
# Build headers
|
132 |
+
header_lines = []
|
133 |
+
for key, value in self.headers.items():
|
134 |
+
header_lines.append(f"{key}: {value}\r\n")
|
135 |
+
|
136 |
+
# Combine all parts
|
137 |
+
request_data = request_line + ''.join(header_lines) + '\r\n'
|
138 |
+
return request_data.encode('utf-8') + self.body
|
139 |
+
|
140 |
+
|
141 |
+
class HTTPResponse:
|
142 |
+
"""Represents an HTTP response"""
|
143 |
+
|
144 |
+
def __init__(self, status_code: int = 200, reason: str = 'OK', headers: Dict[str, str] = None, body: bytes = b''):
|
145 |
+
self.status_code = status_code
|
146 |
+
self.reason = reason
|
147 |
+
self.headers = headers or {}
|
148 |
+
self.body = body
|
149 |
+
self.version = 'HTTP/1.1'
|
150 |
+
|
151 |
+
@classmethod
|
152 |
+
def parse(cls, data: bytes) -> Optional['HTTPResponse']:
|
153 |
+
"""Parse HTTP response from raw data"""
|
154 |
+
try:
|
155 |
+
lines = data.decode('utf-8', errors='ignore').split('\r\n')
|
156 |
+
if not lines:
|
157 |
+
return None
|
158 |
+
|
159 |
+
# Parse status line
|
160 |
+
status_line = lines[0].split(' ', 2)
|
161 |
+
if len(status_line) < 3:
|
162 |
+
return None
|
163 |
+
|
164 |
+
version, status_code, reason = status_line[0], int(status_line[1]), status_line[2]
|
165 |
+
|
166 |
+
# Parse headers
|
167 |
+
headers = {}
|
168 |
+
body_start = 1
|
169 |
+
for i, line in enumerate(lines[1:], 1):
|
170 |
+
if line == '':
|
171 |
+
body_start = i + 1
|
172 |
+
break
|
173 |
+
if ':' in line:
|
174 |
+
key, value = line.split(':', 1)
|
175 |
+
headers[key.strip().lower()] = value.strip()
|
176 |
+
|
177 |
+
# Parse body
|
178 |
+
body_lines = lines[body_start:]
|
179 |
+
body = '\r\n'.join(body_lines).encode('utf-8')
|
180 |
+
|
181 |
+
return cls(status_code, reason, headers, body)
|
182 |
+
|
183 |
+
except Exception:
|
184 |
+
return None
|
185 |
+
|
186 |
+
def to_bytes(self) -> bytes:
|
187 |
+
"""Convert to raw HTTP response"""
|
188 |
+
status_line = f"{self.version} {self.status_code} {self.reason}\r\n"
|
189 |
+
|
190 |
+
# Add default headers
|
191 |
+
if 'content-length' not in self.headers and self.body:
|
192 |
+
self.headers['content-length'] = str(len(self.body))
|
193 |
+
if 'server' not in self.headers:
|
194 |
+
self.headers['server'] = 'VirtualISP/1.0'
|
195 |
+
|
196 |
+
# Build headers
|
197 |
+
header_lines = []
|
198 |
+
for key, value in self.headers.items():
|
199 |
+
header_lines.append(f"{key}: {value}\r\n")
|
200 |
+
|
201 |
+
# Combine all parts
|
202 |
+
response_data = status_line + ''.join(header_lines) + '\r\n'
|
203 |
+
return response_data.encode('utf-8') + self.body
|
204 |
+
|
205 |
+
|
206 |
+
class SocketTranslator:
|
207 |
+
"""Socket translator implementation"""
|
208 |
+
|
209 |
+
def __init__(self, config: Dict):
|
210 |
+
self.config = config
|
211 |
+
self.connections: Dict[str, SocketConnection] = {}
|
212 |
+
self.lock = threading.Lock()
|
213 |
+
|
214 |
+
# Configuration
|
215 |
+
self.connect_timeout = config.get('connect_timeout', 10)
|
216 |
+
self.read_timeout = config.get('read_timeout', 30)
|
217 |
+
self.max_connections = config.get('max_connections', 1000)
|
218 |
+
self.buffer_size = config.get('buffer_size', 8192)
|
219 |
+
|
220 |
+
# HTTP client session
|
221 |
+
self.http_session = None
|
222 |
+
self.loop = None
|
223 |
+
|
224 |
+
# Statistics
|
225 |
+
self.stats = {
|
226 |
+
'total_connections': 0,
|
227 |
+
'active_connections': 0,
|
228 |
+
'failed_connections': 0,
|
229 |
+
'bytes_transferred': 0,
|
230 |
+
'http_requests': 0,
|
231 |
+
'tcp_connections': 0,
|
232 |
+
'udp_connections': 0
|
233 |
+
}
|
234 |
+
|
235 |
+
# Background tasks
|
236 |
+
self.running = False
|
237 |
+
self.cleanup_thread = None
|
238 |
+
|
239 |
+
async def _init_http_session(self):
|
240 |
+
"""Initialize HTTP client session"""
|
241 |
+
connector = aiohttp.TCPConnector(
|
242 |
+
limit=100,
|
243 |
+
limit_per_host=10,
|
244 |
+
ttl_dns_cache=300,
|
245 |
+
use_dns_cache=True,
|
246 |
+
)
|
247 |
+
|
248 |
+
timeout = aiohttp.ClientTimeout(
|
249 |
+
total=self.read_timeout,
|
250 |
+
connect=self.connect_timeout
|
251 |
+
)
|
252 |
+
|
253 |
+
self.http_session = aiohttp.ClientSession(
|
254 |
+
connector=connector,
|
255 |
+
timeout=timeout,
|
256 |
+
headers={'User-Agent': 'VirtualISP/1.0'}
|
257 |
+
)
|
258 |
+
|
259 |
+
def _is_http_request(self, data: bytes) -> bool:
|
260 |
+
"""Check if data looks like an HTTP request"""
|
261 |
+
try:
|
262 |
+
first_line = data.split(b'\r\n')[0].decode('utf-8', errors='ignore')
|
263 |
+
methods = ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS', 'PATCH', 'TRACE']
|
264 |
+
return any(first_line.startswith(method + ' ') for method in methods)
|
265 |
+
except:
|
266 |
+
return False
|
267 |
+
|
268 |
+
def _determine_connection_type(self, remote_host: str, remote_port: int, data: bytes = b'') -> ConnectionType:
|
269 |
+
"""Determine the appropriate connection type"""
|
270 |
+
# Check for HTTP/HTTPS based on port and data
|
271 |
+
if remote_port == 80 or (data and self._is_http_request(data)):
|
272 |
+
return ConnectionType.HTTP_CLIENT
|
273 |
+
elif remote_port == 443:
|
274 |
+
return ConnectionType.HTTPS_CLIENT
|
275 |
+
else:
|
276 |
+
return ConnectionType.TCP_SOCKET
|
277 |
+
|
278 |
+
def create_connection(self, virtual_conn: TCPConnection, remote_host: str, remote_port: int,
|
279 |
+
initial_data: bytes = b'') -> Optional[SocketConnection]:
|
280 |
+
"""Create a new socket connection"""
|
281 |
+
connection_id = f"{virtual_conn.connection_id}->{remote_host}:{remote_port}"
|
282 |
+
|
283 |
+
# Check connection limit
|
284 |
+
with self.lock:
|
285 |
+
if len(self.connections) >= self.max_connections:
|
286 |
+
return None
|
287 |
+
|
288 |
+
# Determine connection type
|
289 |
+
conn_type = self._determine_connection_type(remote_host, remote_port, initial_data)
|
290 |
+
|
291 |
+
# Create socket connection
|
292 |
+
socket_conn = SocketConnection(
|
293 |
+
connection_id=connection_id,
|
294 |
+
connection_type=conn_type,
|
295 |
+
virtual_connection=virtual_conn,
|
296 |
+
host_socket=None,
|
297 |
+
remote_host=remote_host,
|
298 |
+
remote_port=remote_port,
|
299 |
+
created_time=time.time(),
|
300 |
+
last_activity=time.time()
|
301 |
+
)
|
302 |
+
|
303 |
+
with self.lock:
|
304 |
+
self.connections[connection_id] = socket_conn
|
305 |
+
|
306 |
+
# Establish connection based on type
|
307 |
+
if conn_type in [ConnectionType.HTTP_CLIENT, ConnectionType.HTTPS_CLIENT]:
|
308 |
+
success = self._create_http_connection(socket_conn, initial_data)
|
309 |
+
else:
|
310 |
+
success = self._create_tcp_connection(socket_conn, initial_data)
|
311 |
+
|
312 |
+
if success:
|
313 |
+
self.stats['total_connections'] += 1
|
314 |
+
self.stats['active_connections'] = len(self.connections)
|
315 |
+
|
316 |
+
if conn_type in [ConnectionType.HTTP_CLIENT, ConnectionType.HTTPS_CLIENT]:
|
317 |
+
self.stats['http_requests'] += 1
|
318 |
+
else:
|
319 |
+
self.stats['tcp_connections'] += 1
|
320 |
+
else:
|
321 |
+
self.stats['failed_connections'] += 1
|
322 |
+
with self.lock:
|
323 |
+
if connection_id in self.connections:
|
324 |
+
del self.connections[connection_id]
|
325 |
+
return None
|
326 |
+
|
327 |
+
return socket_conn
|
328 |
+
|
329 |
+
def _create_tcp_connection(self, socket_conn: SocketConnection, initial_data: bytes) -> bool:
|
330 |
+
"""Create TCP socket connection"""
|
331 |
+
try:
|
332 |
+
# Create socket
|
333 |
+
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
334 |
+
sock.settimeout(self.connect_timeout)
|
335 |
+
|
336 |
+
# Connect
|
337 |
+
sock.connect((socket_conn.remote_host, socket_conn.remote_port))
|
338 |
+
sock.settimeout(self.read_timeout)
|
339 |
+
|
340 |
+
socket_conn.host_socket = sock
|
341 |
+
socket_conn.is_connected = True
|
342 |
+
|
343 |
+
# Send initial data if any
|
344 |
+
if initial_data:
|
345 |
+
sock.send(initial_data)
|
346 |
+
socket_conn.update_activity(len(initial_data), 'sent')
|
347 |
+
|
348 |
+
# Start background thread for receiving data
|
349 |
+
thread = threading.Thread(
|
350 |
+
target=self._tcp_receive_loop,
|
351 |
+
args=(socket_conn,),
|
352 |
+
daemon=True
|
353 |
+
)
|
354 |
+
thread.start()
|
355 |
+
|
356 |
+
return True
|
357 |
+
|
358 |
+
except Exception as e:
|
359 |
+
print(f"Failed to create TCP connection to {socket_conn.remote_host}:{socket_conn.remote_port}: {e}")
|
360 |
+
socket_conn.error_count += 1
|
361 |
+
return False
|
362 |
+
|
363 |
+
def _create_http_connection(self, socket_conn: SocketConnection, initial_data: bytes) -> bool:
|
364 |
+
"""Create HTTP connection"""
|
365 |
+
try:
|
366 |
+
# Parse HTTP request
|
367 |
+
http_request = HTTPRequest.parse(initial_data)
|
368 |
+
if not http_request:
|
369 |
+
return False
|
370 |
+
|
371 |
+
# Set host header
|
372 |
+
http_request.headers['host'] = socket_conn.remote_host
|
373 |
+
|
374 |
+
# Start async HTTP request
|
375 |
+
if self.loop and not self.loop.is_closed():
|
376 |
+
asyncio.run_coroutine_threadsafe(
|
377 |
+
self._handle_http_request(socket_conn, http_request),
|
378 |
+
self.loop
|
379 |
+
)
|
380 |
+
else:
|
381 |
+
# Fallback to sync HTTP handling
|
382 |
+
return self._handle_http_request_sync(socket_conn, http_request)
|
383 |
+
|
384 |
+
return True
|
385 |
+
|
386 |
+
except Exception as e:
|
387 |
+
print(f"Failed to create HTTP connection to {socket_conn.remote_host}:{socket_conn.remote_port}: {e}")
|
388 |
+
socket_conn.error_count += 1
|
389 |
+
return False
|
390 |
+
|
391 |
+
async def _handle_http_request(self, socket_conn: SocketConnection, http_request: HTTPRequest):
|
392 |
+
"""Handle HTTP request asynchronously"""
|
393 |
+
try:
|
394 |
+
if not self.http_session:
|
395 |
+
await self._init_http_session()
|
396 |
+
|
397 |
+
# Build URL
|
398 |
+
scheme = 'https' if socket_conn.connection_type == ConnectionType.HTTPS_CLIENT else 'http'
|
399 |
+
url = f"{scheme}://{socket_conn.remote_host}:{socket_conn.remote_port}{http_request.path}"
|
400 |
+
|
401 |
+
# Make request
|
402 |
+
async with self.http_session.request(
|
403 |
+
method=http_request.method,
|
404 |
+
url=url,
|
405 |
+
headers=http_request.headers,
|
406 |
+
data=http_request.body
|
407 |
+
) as response:
|
408 |
+
# Read response
|
409 |
+
response_body = await response.read()
|
410 |
+
|
411 |
+
# Create HTTP response
|
412 |
+
http_response = HTTPResponse(
|
413 |
+
status_code=response.status,
|
414 |
+
reason=response.reason or 'OK',
|
415 |
+
headers=dict(response.headers),
|
416 |
+
body=response_body
|
417 |
+
)
|
418 |
+
|
419 |
+
# Send response back to virtual connection
|
420 |
+
response_data = http_response.to_bytes()
|
421 |
+
if socket_conn.virtual_connection and socket_conn.virtual_connection.on_data_received:
|
422 |
+
socket_conn.virtual_connection.on_data_received(response_data)
|
423 |
+
|
424 |
+
socket_conn.update_activity(len(response_data), 'received')
|
425 |
+
self.stats['bytes_transferred'] += len(response_data)
|
426 |
+
|
427 |
+
except Exception as e:
|
428 |
+
print(f"HTTP request failed: {e}")
|
429 |
+
socket_conn.error_count += 1
|
430 |
+
|
431 |
+
# Send error response
|
432 |
+
error_response = HTTPResponse(
|
433 |
+
status_code=500,
|
434 |
+
reason='Internal Server Error',
|
435 |
+
body=f"Error: {str(e)}".encode('utf-8')
|
436 |
+
)
|
437 |
+
|
438 |
+
response_data = error_response.to_bytes()
|
439 |
+
if socket_conn.virtual_connection and socket_conn.virtual_connection.on_data_received:
|
440 |
+
socket_conn.virtual_connection.on_data_received(response_data)
|
441 |
+
|
442 |
+
def _handle_http_request_sync(self, socket_conn: SocketConnection, http_request: HTTPRequest) -> bool:
|
443 |
+
"""Handle HTTP request synchronously (fallback)"""
|
444 |
+
try:
|
445 |
+
# Use urllib for sync HTTP requests
|
446 |
+
scheme = 'https' if socket_conn.connection_type == ConnectionType.HTTPS_CLIENT else 'http'
|
447 |
+
url = f"{scheme}://{socket_conn.remote_host}:{socket_conn.remote_port}{http_request.path}"
|
448 |
+
|
449 |
+
import urllib.request
|
450 |
+
import urllib.error
|
451 |
+
|
452 |
+
# Create request
|
453 |
+
req = urllib.request.Request(
|
454 |
+
url,
|
455 |
+
data=http_request.body if http_request.body else None,
|
456 |
+
headers=http_request.headers,
|
457 |
+
method=http_request.method
|
458 |
+
)
|
459 |
+
|
460 |
+
# Make request
|
461 |
+
with urllib.request.urlopen(req, timeout=self.read_timeout) as response:
|
462 |
+
response_body = response.read()
|
463 |
+
|
464 |
+
# Create HTTP response
|
465 |
+
http_response = HTTPResponse(
|
466 |
+
status_code=response.getcode(),
|
467 |
+
reason='OK',
|
468 |
+
headers=dict(response.headers),
|
469 |
+
body=response_body
|
470 |
+
)
|
471 |
+
|
472 |
+
# Send response back to virtual connection
|
473 |
+
response_data = http_response.to_bytes()
|
474 |
+
if socket_conn.virtual_connection and socket_conn.virtual_connection.on_data_received:
|
475 |
+
socket_conn.virtual_connection.on_data_received(response_data)
|
476 |
+
|
477 |
+
socket_conn.update_activity(len(response_data), 'received')
|
478 |
+
self.stats['bytes_transferred'] += len(response_data)
|
479 |
+
|
480 |
+
return True
|
481 |
+
|
482 |
+
except Exception as e:
|
483 |
+
print(f"Sync HTTP request failed: {e}")
|
484 |
+
socket_conn.error_count += 1
|
485 |
+
return False
|
486 |
+
|
487 |
+
def _tcp_receive_loop(self, socket_conn: SocketConnection):
|
488 |
+
"""Background loop for receiving TCP data"""
|
489 |
+
sock = socket_conn.host_socket
|
490 |
+
if not sock:
|
491 |
+
return
|
492 |
+
|
493 |
+
try:
|
494 |
+
while socket_conn.is_connected:
|
495 |
+
try:
|
496 |
+
data = sock.recv(self.buffer_size)
|
497 |
+
if not data:
|
498 |
+
break
|
499 |
+
|
500 |
+
# Forward data to virtual connection
|
501 |
+
if socket_conn.virtual_connection and socket_conn.virtual_connection.on_data_received:
|
502 |
+
socket_conn.virtual_connection.on_data_received(data)
|
503 |
+
|
504 |
+
socket_conn.update_activity(len(data), 'received')
|
505 |
+
self.stats['bytes_transferred'] += len(data)
|
506 |
+
|
507 |
+
except socket.timeout:
|
508 |
+
continue
|
509 |
+
except Exception as e:
|
510 |
+
print(f"TCP receive error: {e}")
|
511 |
+
break
|
512 |
+
|
513 |
+
finally:
|
514 |
+
self._close_connection(socket_conn.connection_id)
|
515 |
+
|
516 |
+
def send_data(self, connection_id: str, data: bytes) -> bool:
|
517 |
+
"""Send data through socket connection"""
|
518 |
+
with self.lock:
|
519 |
+
socket_conn = self.connections.get(connection_id)
|
520 |
+
|
521 |
+
if not socket_conn or not socket_conn.is_connected:
|
522 |
+
return False
|
523 |
+
|
524 |
+
try:
|
525 |
+
if socket_conn.connection_type in [ConnectionType.HTTP_CLIENT, ConnectionType.HTTPS_CLIENT]:
|
526 |
+
# For HTTP connections, treat as new request
|
527 |
+
return self._create_http_connection(socket_conn, data)
|
528 |
+
else:
|
529 |
+
# TCP connection
|
530 |
+
if socket_conn.host_socket:
|
531 |
+
socket_conn.host_socket.send(data)
|
532 |
+
socket_conn.update_activity(len(data), 'sent')
|
533 |
+
self.stats['bytes_transferred'] += len(data)
|
534 |
+
return True
|
535 |
+
|
536 |
+
except Exception as e:
|
537 |
+
print(f"Failed to send data: {e}")
|
538 |
+
socket_conn.error_count += 1
|
539 |
+
self._close_connection(connection_id)
|
540 |
+
|
541 |
+
return False
|
542 |
+
|
543 |
+
def _close_connection(self, connection_id: str):
|
544 |
+
"""Close socket connection"""
|
545 |
+
with self.lock:
|
546 |
+
socket_conn = self.connections.get(connection_id)
|
547 |
+
if not socket_conn:
|
548 |
+
return
|
549 |
+
|
550 |
+
# Close socket
|
551 |
+
if socket_conn.host_socket:
|
552 |
+
try:
|
553 |
+
socket_conn.host_socket.close()
|
554 |
+
except:
|
555 |
+
pass
|
556 |
+
|
557 |
+
socket_conn.is_connected = False
|
558 |
+
|
559 |
+
# Remove from connections
|
560 |
+
del self.connections[connection_id]
|
561 |
+
|
562 |
+
self.stats['active_connections'] = len(self.connections)
|
563 |
+
|
564 |
+
def close_connection(self, connection_id: str) -> bool:
|
565 |
+
"""Manually close connection"""
|
566 |
+
self._close_connection(connection_id)
|
567 |
+
return True
|
568 |
+
|
569 |
+
def get_connection(self, connection_id: str) -> Optional[SocketConnection]:
|
570 |
+
"""Get socket connection"""
|
571 |
+
with self.lock:
|
572 |
+
return self.connections.get(connection_id)
|
573 |
+
|
574 |
+
def get_connections(self) -> Dict[str, Dict]:
|
575 |
+
"""Get all socket connections"""
|
576 |
+
with self.lock:
|
577 |
+
return {
|
578 |
+
conn_id: conn.to_dict()
|
579 |
+
for conn_id, conn in self.connections.items()
|
580 |
+
}
|
581 |
+
|
582 |
+
def get_stats(self) -> Dict:
|
583 |
+
"""Get socket translator statistics"""
|
584 |
+
with self.lock:
|
585 |
+
stats = self.stats.copy()
|
586 |
+
stats['active_connections'] = len(self.connections)
|
587 |
+
|
588 |
+
return stats
|
589 |
+
|
590 |
+
def _cleanup_loop(self):
|
591 |
+
"""Background cleanup loop"""
|
592 |
+
while self.running:
|
593 |
+
try:
|
594 |
+
current_time = time.time()
|
595 |
+
expired_connections = []
|
596 |
+
|
597 |
+
with self.lock:
|
598 |
+
for conn_id, conn in self.connections.items():
|
599 |
+
# Close connections that have been inactive too long
|
600 |
+
if current_time - conn.last_activity > self.read_timeout * 2:
|
601 |
+
expired_connections.append(conn_id)
|
602 |
+
|
603 |
+
for conn_id in expired_connections:
|
604 |
+
self._close_connection(conn_id)
|
605 |
+
|
606 |
+
time.sleep(30) # Cleanup every 30 seconds
|
607 |
+
|
608 |
+
except Exception as e:
|
609 |
+
print(f"Socket translator cleanup error: {e}")
|
610 |
+
time.sleep(5)
|
611 |
+
|
612 |
+
def start(self):
|
613 |
+
"""Start socket translator"""
|
614 |
+
self.running = True
|
615 |
+
|
616 |
+
# Start event loop for async HTTP
|
617 |
+
try:
|
618 |
+
self.loop = asyncio.new_event_loop()
|
619 |
+
asyncio.set_event_loop(self.loop)
|
620 |
+
|
621 |
+
# Start cleanup thread
|
622 |
+
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
|
623 |
+
self.cleanup_thread.start()
|
624 |
+
|
625 |
+
print("Socket translator started")
|
626 |
+
except Exception as e:
|
627 |
+
print(f"Failed to start socket translator: {e}")
|
628 |
+
|
629 |
+
def stop(self):
|
630 |
+
"""Stop socket translator"""
|
631 |
+
self.running = False
|
632 |
+
|
633 |
+
# Close all connections
|
634 |
+
with self.lock:
|
635 |
+
connection_ids = list(self.connections.keys())
|
636 |
+
|
637 |
+
for conn_id in connection_ids:
|
638 |
+
self._close_connection(conn_id)
|
639 |
+
|
640 |
+
# Close HTTP session
|
641 |
+
if self.http_session:
|
642 |
+
asyncio.run_coroutine_threadsafe(self.http_session.close(), self.loop)
|
643 |
+
|
644 |
+
# Close event loop
|
645 |
+
if self.loop and not self.loop.is_closed():
|
646 |
+
self.loop.call_soon_threadsafe(self.loop.stop)
|
647 |
+
|
648 |
+
# Wait for cleanup thread
|
649 |
+
if self.cleanup_thread:
|
650 |
+
self.cleanup_thread.join()
|
651 |
+
|
652 |
+
print("Socket translator stopped")
|
653 |
+
|
core/tcp_engine.py
ADDED
@@ -0,0 +1,716 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
TCP Engine Module
|
3 |
+
|
4 |
+
Implements a complete TCP state machine in user-space:
|
5 |
+
- Full TCP state machine (SYN, SYN-ACK, ESTABLISHED, FIN, RST)
|
6 |
+
- Sequence and acknowledgment number tracking
|
7 |
+
- Sliding window implementation
|
8 |
+
- Retransmission and timeout handling
|
9 |
+
- Congestion control
|
10 |
+
"""
|
11 |
+
|
12 |
+
import time
|
13 |
+
import threading
|
14 |
+
import random
|
15 |
+
from typing import Dict, List, Optional, Tuple, Callable
|
16 |
+
from dataclasses import dataclass, field
|
17 |
+
from enum import Enum
|
18 |
+
from collections import deque
|
19 |
+
|
20 |
+
from .ip_parser import TCPHeader, IPv4Header, IPParser
|
21 |
+
|
22 |
+
|
23 |
+
class TCPState(Enum):
|
24 |
+
CLOSED = "CLOSED"
|
25 |
+
LISTEN = "LISTEN"
|
26 |
+
SYN_SENT = "SYN_SENT"
|
27 |
+
SYN_RECEIVED = "SYN_RECEIVED"
|
28 |
+
ESTABLISHED = "ESTABLISHED"
|
29 |
+
FIN_WAIT_1 = "FIN_WAIT_1"
|
30 |
+
FIN_WAIT_2 = "FIN_WAIT_2"
|
31 |
+
CLOSE_WAIT = "CLOSE_WAIT"
|
32 |
+
CLOSING = "CLOSING"
|
33 |
+
LAST_ACK = "LAST_ACK"
|
34 |
+
TIME_WAIT = "TIME_WAIT"
|
35 |
+
|
36 |
+
|
37 |
+
@dataclass
|
38 |
+
class TCPSegment:
|
39 |
+
"""Represents a TCP segment"""
|
40 |
+
seq_num: int
|
41 |
+
ack_num: int
|
42 |
+
flags: int
|
43 |
+
window: int
|
44 |
+
data: bytes
|
45 |
+
timestamp: float = field(default_factory=time.time)
|
46 |
+
retransmit_count: int = 0
|
47 |
+
|
48 |
+
@property
|
49 |
+
def data_length(self) -> int:
|
50 |
+
"""Get data length"""
|
51 |
+
return len(self.data)
|
52 |
+
|
53 |
+
@property
|
54 |
+
def seq_end(self) -> int:
|
55 |
+
"""Get sequence number after this segment"""
|
56 |
+
length = self.data_length
|
57 |
+
# SYN and FIN consume one sequence number
|
58 |
+
if self.flags & 0x02: # SYN
|
59 |
+
length += 1
|
60 |
+
if self.flags & 0x01: # FIN
|
61 |
+
length += 1
|
62 |
+
return self.seq_num + length
|
63 |
+
|
64 |
+
|
65 |
+
@dataclass
|
66 |
+
class TCPConnection:
|
67 |
+
"""Represents a TCP connection state"""
|
68 |
+
# Connection identification
|
69 |
+
local_ip: str
|
70 |
+
local_port: int
|
71 |
+
remote_ip: str
|
72 |
+
remote_port: int
|
73 |
+
|
74 |
+
# State
|
75 |
+
state: TCPState = TCPState.CLOSED
|
76 |
+
|
77 |
+
# Sequence numbers
|
78 |
+
local_seq: int = 0
|
79 |
+
local_ack: int = 0
|
80 |
+
remote_seq: int = 0
|
81 |
+
remote_ack: int = 0
|
82 |
+
initial_seq: int = 0
|
83 |
+
|
84 |
+
# Window management
|
85 |
+
local_window: int = 65535
|
86 |
+
remote_window: int = 65535
|
87 |
+
window_scale: int = 0
|
88 |
+
|
89 |
+
# Buffers
|
90 |
+
send_buffer: deque = field(default_factory=deque)
|
91 |
+
recv_buffer: deque = field(default_factory=deque)
|
92 |
+
out_of_order_buffer: Dict[int, bytes] = field(default_factory=dict)
|
93 |
+
|
94 |
+
# Retransmission
|
95 |
+
unacked_segments: Dict[int, TCPSegment] = field(default_factory=dict)
|
96 |
+
retransmit_timer: Optional[float] = None
|
97 |
+
rto: float = 1.0 # Retransmission timeout
|
98 |
+
srtt: float = 0.0 # Smoothed round-trip time
|
99 |
+
rttvar: float = 0.0 # Round-trip time variation
|
100 |
+
|
101 |
+
# Congestion control
|
102 |
+
cwnd: int = 1 # Congestion window (in MSS units)
|
103 |
+
ssthresh: int = 65535 # Slow start threshold
|
104 |
+
mss: int = 1460 # Maximum segment size
|
105 |
+
|
106 |
+
# Timers
|
107 |
+
last_activity: float = field(default_factory=time.time)
|
108 |
+
time_wait_start: Optional[float] = None
|
109 |
+
|
110 |
+
# Callbacks
|
111 |
+
on_data_received: Optional[Callable[[bytes], None]] = None
|
112 |
+
on_connection_closed: Optional[Callable[[], None]] = None
|
113 |
+
|
114 |
+
@property
|
115 |
+
def connection_id(self) -> str:
|
116 |
+
"""Get unique connection identifier"""
|
117 |
+
return f"{self.local_ip}:{self.local_port}-{self.remote_ip}:{self.remote_port}"
|
118 |
+
|
119 |
+
@property
|
120 |
+
def is_established(self) -> bool:
|
121 |
+
"""Check if connection is established"""
|
122 |
+
return self.state == TCPState.ESTABLISHED
|
123 |
+
|
124 |
+
@property
|
125 |
+
def can_send_data(self) -> bool:
|
126 |
+
"""Check if connection can send data"""
|
127 |
+
return self.state in [TCPState.ESTABLISHED, TCPState.CLOSE_WAIT]
|
128 |
+
|
129 |
+
@property
|
130 |
+
def effective_window(self) -> int:
|
131 |
+
"""Get effective send window"""
|
132 |
+
return min(self.remote_window, self.cwnd * self.mss)
|
133 |
+
|
134 |
+
|
135 |
+
class TCPEngine:
|
136 |
+
"""TCP state machine implementation"""
|
137 |
+
|
138 |
+
def __init__(self, config: Dict):
|
139 |
+
self.config = config
|
140 |
+
self.connections: Dict[str, TCPConnection] = {}
|
141 |
+
self.listening_ports: Dict[int, Callable] = {} # port -> accept callback
|
142 |
+
self.lock = threading.Lock()
|
143 |
+
self.running = False
|
144 |
+
self.timer_thread = None
|
145 |
+
|
146 |
+
# Default configuration
|
147 |
+
self.default_mss = config.get('mss', 1460)
|
148 |
+
self.default_window = config.get('initial_window', 65535)
|
149 |
+
self.max_retries = config.get('max_retries', 3)
|
150 |
+
self.connection_timeout = config.get('timeout', 300)
|
151 |
+
self.time_wait_timeout = config.get('time_wait_timeout', 120)
|
152 |
+
|
153 |
+
def _generate_isn(self) -> int:
|
154 |
+
"""Generate Initial Sequence Number"""
|
155 |
+
return random.randint(0, 0xFFFFFFFF)
|
156 |
+
|
157 |
+
def _get_connection_key(self, local_ip: str, local_port: int, remote_ip: str, remote_port: int) -> str:
|
158 |
+
"""Get connection key"""
|
159 |
+
return f"{local_ip}:{local_port}-{remote_ip}:{remote_port}"
|
160 |
+
|
161 |
+
def _create_tcp_segment(self, conn: TCPConnection, flags: int, data: bytes = b'') -> TCPSegment:
|
162 |
+
"""Create TCP segment"""
|
163 |
+
segment = TCPSegment(
|
164 |
+
seq_num=conn.local_seq,
|
165 |
+
ack_num=conn.local_ack,
|
166 |
+
flags=flags,
|
167 |
+
window=conn.local_window,
|
168 |
+
data=data
|
169 |
+
)
|
170 |
+
return segment
|
171 |
+
|
172 |
+
def _build_tcp_packet(self, conn: TCPConnection, segment: TCPSegment) -> bytes:
|
173 |
+
"""Build complete TCP packet"""
|
174 |
+
# Create IP header
|
175 |
+
ip_header = IPv4Header(
|
176 |
+
protocol=6, # TCP
|
177 |
+
source_ip=conn.local_ip,
|
178 |
+
dest_ip=conn.remote_ip,
|
179 |
+
ttl=64
|
180 |
+
)
|
181 |
+
|
182 |
+
# Create TCP header
|
183 |
+
tcp_header = TCPHeader(
|
184 |
+
source_port=conn.local_port,
|
185 |
+
dest_port=conn.remote_port,
|
186 |
+
seq_num=segment.seq_num,
|
187 |
+
ack_num=segment.ack_num,
|
188 |
+
flags=segment.flags,
|
189 |
+
window_size=segment.window
|
190 |
+
)
|
191 |
+
|
192 |
+
# Build packet
|
193 |
+
return IPParser.build_packet(ip_header, tcp_header, segment.data)
|
194 |
+
|
195 |
+
def _update_rto(self, conn: TCPConnection, rtt: float):
|
196 |
+
"""Update retransmission timeout using RFC 6298"""
|
197 |
+
if conn.srtt == 0:
|
198 |
+
# First RTT measurement
|
199 |
+
conn.srtt = rtt
|
200 |
+
conn.rttvar = rtt / 2
|
201 |
+
else:
|
202 |
+
# Subsequent measurements
|
203 |
+
alpha = 0.125
|
204 |
+
beta = 0.25
|
205 |
+
conn.rttvar = (1 - beta) * conn.rttvar + beta * abs(conn.srtt - rtt)
|
206 |
+
conn.srtt = (1 - alpha) * conn.srtt + alpha * rtt
|
207 |
+
|
208 |
+
# Calculate RTO
|
209 |
+
conn.rto = max(1.0, conn.srtt + 4 * conn.rttvar)
|
210 |
+
conn.rto = min(conn.rto, 60.0) # Cap at 60 seconds
|
211 |
+
|
212 |
+
def _update_congestion_window(self, conn: TCPConnection, acked_bytes: int):
|
213 |
+
"""Update congestion window (simplified congestion control)"""
|
214 |
+
if conn.cwnd < conn.ssthresh:
|
215 |
+
# Slow start
|
216 |
+
conn.cwnd += 1
|
217 |
+
else:
|
218 |
+
# Congestion avoidance
|
219 |
+
conn.cwnd += max(1, conn.mss * conn.mss // conn.cwnd)
|
220 |
+
|
221 |
+
def _handle_retransmission(self, conn: TCPConnection):
|
222 |
+
"""Handle segment retransmission"""
|
223 |
+
current_time = time.time()
|
224 |
+
|
225 |
+
# Find segments that need retransmission
|
226 |
+
to_retransmit = []
|
227 |
+
for seq_num, segment in conn.unacked_segments.items():
|
228 |
+
if current_time - segment.timestamp > conn.rto:
|
229 |
+
if segment.retransmit_count < self.max_retries:
|
230 |
+
to_retransmit.append(segment)
|
231 |
+
else:
|
232 |
+
# Max retries exceeded, close connection
|
233 |
+
self._close_connection(conn, reset=True)
|
234 |
+
return
|
235 |
+
|
236 |
+
# Retransmit segments
|
237 |
+
for segment in to_retransmit:
|
238 |
+
segment.retransmit_count += 1
|
239 |
+
segment.timestamp = current_time
|
240 |
+
|
241 |
+
# Exponential backoff
|
242 |
+
conn.rto = min(conn.rto * 2, 60.0)
|
243 |
+
|
244 |
+
# Congestion control: reduce window
|
245 |
+
conn.ssthresh = max(conn.cwnd // 2, 2)
|
246 |
+
conn.cwnd = 1
|
247 |
+
|
248 |
+
# Send retransmitted segment
|
249 |
+
packet = self._build_tcp_packet(conn, segment)
|
250 |
+
self._send_packet(packet)
|
251 |
+
|
252 |
+
def _send_packet(self, packet: bytes):
|
253 |
+
"""Send packet (to be implemented by integration layer)"""
|
254 |
+
# This will be connected to the packet bridge
|
255 |
+
pass
|
256 |
+
|
257 |
+
def _close_connection(self, conn: TCPConnection, reset: bool = False):
|
258 |
+
"""Close connection"""
|
259 |
+
if reset:
|
260 |
+
# Send RST
|
261 |
+
segment = self._create_tcp_segment(conn, 0x04) # RST flag
|
262 |
+
packet = self._build_tcp_packet(conn, segment)
|
263 |
+
self._send_packet(packet)
|
264 |
+
conn.state = TCPState.CLOSED
|
265 |
+
else:
|
266 |
+
# Normal close
|
267 |
+
if conn.state == TCPState.ESTABLISHED:
|
268 |
+
# Send FIN
|
269 |
+
segment = self._create_tcp_segment(conn, 0x01) # FIN flag
|
270 |
+
packet = self._build_tcp_packet(conn, segment)
|
271 |
+
self._send_packet(packet)
|
272 |
+
conn.local_seq += 1
|
273 |
+
conn.state = TCPState.FIN_WAIT_1
|
274 |
+
|
275 |
+
# Cleanup if closed
|
276 |
+
if conn.state == TCPState.CLOSED:
|
277 |
+
if conn.on_connection_closed:
|
278 |
+
conn.on_connection_closed()
|
279 |
+
|
280 |
+
with self.lock:
|
281 |
+
if conn.connection_id in self.connections:
|
282 |
+
del self.connections[conn.connection_id]
|
283 |
+
|
284 |
+
def listen(self, port: int, accept_callback: Callable):
|
285 |
+
"""Listen on port for incoming connections"""
|
286 |
+
with self.lock:
|
287 |
+
self.listening_ports[port] = accept_callback
|
288 |
+
|
289 |
+
def connect(self, local_ip: str, local_port: int, remote_ip: str, remote_port: int) -> Optional[TCPConnection]:
|
290 |
+
"""Initiate outbound connection"""
|
291 |
+
conn_key = self._get_connection_key(local_ip, local_port, remote_ip, remote_port)
|
292 |
+
|
293 |
+
# Create connection
|
294 |
+
conn = TCPConnection(
|
295 |
+
local_ip=local_ip,
|
296 |
+
local_port=local_port,
|
297 |
+
remote_ip=remote_ip,
|
298 |
+
remote_port=remote_port,
|
299 |
+
state=TCPState.SYN_SENT,
|
300 |
+
local_seq=self._generate_isn(),
|
301 |
+
mss=self.default_mss,
|
302 |
+
local_window=self.default_window
|
303 |
+
)
|
304 |
+
conn.initial_seq = conn.local_seq
|
305 |
+
|
306 |
+
with self.lock:
|
307 |
+
self.connections[conn_key] = conn
|
308 |
+
|
309 |
+
# Send SYN
|
310 |
+
segment = self._create_tcp_segment(conn, 0x02) # SYN flag
|
311 |
+
packet = self._build_tcp_packet(conn, segment)
|
312 |
+
self._send_packet(packet)
|
313 |
+
|
314 |
+
# Track unacked segment
|
315 |
+
conn.unacked_segments[conn.local_seq] = segment
|
316 |
+
conn.local_seq += 1
|
317 |
+
conn.retransmit_timer = time.time()
|
318 |
+
|
319 |
+
return conn
|
320 |
+
|
321 |
+
def send_data(self, conn: TCPConnection, data: bytes) -> bool:
|
322 |
+
"""Send data on established connection"""
|
323 |
+
if not conn.can_send_data:
|
324 |
+
return False
|
325 |
+
|
326 |
+
# Add to send buffer
|
327 |
+
conn.send_buffer.append(data)
|
328 |
+
|
329 |
+
# Try to send immediately
|
330 |
+
self._try_send_data(conn)
|
331 |
+
|
332 |
+
return True
|
333 |
+
|
334 |
+
def _try_send_data(self, conn: TCPConnection):
|
335 |
+
"""Try to send buffered data"""
|
336 |
+
while conn.send_buffer and len(conn.unacked_segments) * conn.mss < conn.effective_window:
|
337 |
+
data = conn.send_buffer.popleft()
|
338 |
+
|
339 |
+
# Split data if larger than MSS
|
340 |
+
while data:
|
341 |
+
chunk = data[:conn.mss]
|
342 |
+
data = data[conn.mss:]
|
343 |
+
|
344 |
+
# Create and send segment
|
345 |
+
segment = self._create_tcp_segment(conn, 0x18, chunk) # PSH+ACK flags
|
346 |
+
packet = self._build_tcp_packet(conn, segment)
|
347 |
+
self._send_packet(packet)
|
348 |
+
|
349 |
+
# Track unacked segment
|
350 |
+
conn.unacked_segments[conn.local_seq] = segment
|
351 |
+
conn.local_seq += len(chunk)
|
352 |
+
|
353 |
+
if not data:
|
354 |
+
break
|
355 |
+
|
356 |
+
def process_packet(self, packet_data: bytes) -> bool:
|
357 |
+
"""Process incoming TCP packet"""
|
358 |
+
try:
|
359 |
+
# Parse packet
|
360 |
+
parsed = IPParser.parse_packet(packet_data)
|
361 |
+
if not isinstance(parsed.transport_header, TCPHeader):
|
362 |
+
return False
|
363 |
+
|
364 |
+
ip_header = parsed.ip_header
|
365 |
+
tcp_header = parsed.transport_header
|
366 |
+
payload = parsed.payload
|
367 |
+
|
368 |
+
# Find or create connection
|
369 |
+
conn_key = self._get_connection_key(
|
370 |
+
ip_header.dest_ip, tcp_header.dest_port,
|
371 |
+
ip_header.source_ip, tcp_header.source_port
|
372 |
+
)
|
373 |
+
|
374 |
+
with self.lock:
|
375 |
+
conn = self.connections.get(conn_key)
|
376 |
+
|
377 |
+
# Handle new connection (SYN to listening port)
|
378 |
+
if not conn and tcp_header.syn and not tcp_header.ack:
|
379 |
+
if tcp_header.dest_port in self.listening_ports:
|
380 |
+
conn = self._handle_new_connection(ip_header, tcp_header)
|
381 |
+
if conn:
|
382 |
+
self.connections[conn_key] = conn
|
383 |
+
|
384 |
+
if not conn:
|
385 |
+
# Send RST for unknown connection
|
386 |
+
self._send_rst(ip_header, tcp_header)
|
387 |
+
return False
|
388 |
+
|
389 |
+
# Process segment
|
390 |
+
return self._process_segment(conn, tcp_header, payload)
|
391 |
+
|
392 |
+
except Exception as e:
|
393 |
+
print(f"Error processing TCP packet: {e}")
|
394 |
+
return False
|
395 |
+
|
396 |
+
def _handle_new_connection(self, ip_header: IPv4Header, tcp_header: TCPHeader) -> Optional[TCPConnection]:
|
397 |
+
"""Handle new incoming connection"""
|
398 |
+
accept_callback = self.listening_ports.get(tcp_header.dest_port)
|
399 |
+
if not accept_callback:
|
400 |
+
return None
|
401 |
+
|
402 |
+
# Create connection
|
403 |
+
conn = TCPConnection(
|
404 |
+
local_ip=ip_header.dest_ip,
|
405 |
+
local_port=tcp_header.dest_port,
|
406 |
+
remote_ip=ip_header.source_ip,
|
407 |
+
remote_port=tcp_header.source_port,
|
408 |
+
state=TCPState.SYN_RECEIVED,
|
409 |
+
local_seq=self._generate_isn(),
|
410 |
+
remote_seq=tcp_header.seq_num,
|
411 |
+
local_ack=tcp_header.seq_num + 1,
|
412 |
+
mss=self.default_mss,
|
413 |
+
local_window=self.default_window
|
414 |
+
)
|
415 |
+
conn.initial_seq = conn.local_seq
|
416 |
+
|
417 |
+
# Send SYN-ACK
|
418 |
+
segment = self._create_tcp_segment(conn, 0x12) # SYN+ACK flags
|
419 |
+
packet = self._build_tcp_packet(conn, segment)
|
420 |
+
self._send_packet(packet)
|
421 |
+
|
422 |
+
# Track unacked segment
|
423 |
+
conn.unacked_segments[conn.local_seq] = segment
|
424 |
+
conn.local_seq += 1
|
425 |
+
conn.retransmit_timer = time.time()
|
426 |
+
|
427 |
+
# Call accept callback
|
428 |
+
accept_callback(conn)
|
429 |
+
|
430 |
+
return conn
|
431 |
+
|
432 |
+
def _process_segment(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
433 |
+
"""Process TCP segment based on connection state"""
|
434 |
+
conn.last_activity = time.time()
|
435 |
+
|
436 |
+
# Handle RST
|
437 |
+
if tcp_header.rst:
|
438 |
+
conn.state = TCPState.CLOSED
|
439 |
+
self._close_connection(conn)
|
440 |
+
return True
|
441 |
+
|
442 |
+
# State machine
|
443 |
+
if conn.state == TCPState.SYN_SENT:
|
444 |
+
return self._handle_syn_sent(conn, tcp_header, payload)
|
445 |
+
elif conn.state == TCPState.SYN_RECEIVED:
|
446 |
+
return self._handle_syn_received(conn, tcp_header, payload)
|
447 |
+
elif conn.state == TCPState.ESTABLISHED:
|
448 |
+
return self._handle_established(conn, tcp_header, payload)
|
449 |
+
elif conn.state == TCPState.FIN_WAIT_1:
|
450 |
+
return self._handle_fin_wait_1(conn, tcp_header, payload)
|
451 |
+
elif conn.state == TCPState.FIN_WAIT_2:
|
452 |
+
return self._handle_fin_wait_2(conn, tcp_header, payload)
|
453 |
+
elif conn.state == TCPState.CLOSE_WAIT:
|
454 |
+
return self._handle_close_wait(conn, tcp_header, payload)
|
455 |
+
elif conn.state == TCPState.CLOSING:
|
456 |
+
return self._handle_closing(conn, tcp_header, payload)
|
457 |
+
elif conn.state == TCPState.LAST_ACK:
|
458 |
+
return self._handle_last_ack(conn, tcp_header, payload)
|
459 |
+
elif conn.state == TCPState.TIME_WAIT:
|
460 |
+
return self._handle_time_wait(conn, tcp_header, payload)
|
461 |
+
|
462 |
+
return False
|
463 |
+
|
464 |
+
def _handle_syn_sent(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
465 |
+
"""Handle segment in SYN_SENT state"""
|
466 |
+
if tcp_header.syn and tcp_header.ack:
|
467 |
+
# SYN-ACK received
|
468 |
+
if tcp_header.ack_num == conn.local_seq:
|
469 |
+
conn.remote_seq = tcp_header.seq_num
|
470 |
+
conn.local_ack = tcp_header.seq_num + 1
|
471 |
+
conn.remote_window = tcp_header.window_size
|
472 |
+
|
473 |
+
# Remove SYN from unacked segments
|
474 |
+
if conn.local_seq - 1 in conn.unacked_segments:
|
475 |
+
del conn.unacked_segments[conn.local_seq - 1]
|
476 |
+
|
477 |
+
# Send ACK
|
478 |
+
segment = self._create_tcp_segment(conn, 0x10) # ACK flag
|
479 |
+
packet = self._build_tcp_packet(conn, segment)
|
480 |
+
self._send_packet(packet)
|
481 |
+
|
482 |
+
conn.state = TCPState.ESTABLISHED
|
483 |
+
return True
|
484 |
+
|
485 |
+
return False
|
486 |
+
|
487 |
+
def _handle_syn_received(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
488 |
+
"""Handle segment in SYN_RECEIVED state"""
|
489 |
+
if tcp_header.ack and tcp_header.ack_num == conn.local_seq:
|
490 |
+
# ACK for our SYN-ACK
|
491 |
+
conn.remote_window = tcp_header.window_size
|
492 |
+
|
493 |
+
# Remove SYN-ACK from unacked segments
|
494 |
+
if conn.local_seq - 1 in conn.unacked_segments:
|
495 |
+
del conn.unacked_segments[conn.local_seq - 1]
|
496 |
+
|
497 |
+
conn.state = TCPState.ESTABLISHED
|
498 |
+
return True
|
499 |
+
|
500 |
+
return False
|
501 |
+
|
502 |
+
def _handle_established(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
503 |
+
"""Handle segment in ESTABLISHED state"""
|
504 |
+
# Handle ACK
|
505 |
+
if tcp_header.ack:
|
506 |
+
self._process_ack(conn, tcp_header.ack_num)
|
507 |
+
|
508 |
+
# Handle data
|
509 |
+
if payload and tcp_header.seq_num == conn.local_ack:
|
510 |
+
conn.local_ack += len(payload)
|
511 |
+
|
512 |
+
# Deliver data
|
513 |
+
if conn.on_data_received:
|
514 |
+
conn.on_data_received(payload)
|
515 |
+
|
516 |
+
# Send ACK
|
517 |
+
segment = self._create_tcp_segment(conn, 0x10) # ACK flag
|
518 |
+
packet = self._build_tcp_packet(conn, segment)
|
519 |
+
self._send_packet(packet)
|
520 |
+
|
521 |
+
# Handle FIN
|
522 |
+
if tcp_header.fin:
|
523 |
+
conn.local_ack += 1
|
524 |
+
|
525 |
+
# Send ACK
|
526 |
+
segment = self._create_tcp_segment(conn, 0x10) # ACK flag
|
527 |
+
packet = self._build_tcp_packet(conn, segment)
|
528 |
+
self._send_packet(packet)
|
529 |
+
|
530 |
+
conn.state = TCPState.CLOSE_WAIT
|
531 |
+
|
532 |
+
return True
|
533 |
+
|
534 |
+
def _handle_fin_wait_1(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
535 |
+
"""Handle segment in FIN_WAIT_1 state"""
|
536 |
+
if tcp_header.ack:
|
537 |
+
self._process_ack(conn, tcp_header.ack_num)
|
538 |
+
if not conn.unacked_segments: # Our FIN was ACKed
|
539 |
+
conn.state = TCPState.FIN_WAIT_2
|
540 |
+
|
541 |
+
if tcp_header.fin:
|
542 |
+
conn.local_ack += 1
|
543 |
+
|
544 |
+
# Send ACK
|
545 |
+
segment = self._create_tcp_segment(conn, 0x10) # ACK flag
|
546 |
+
packet = self._build_tcp_packet(conn, segment)
|
547 |
+
self._send_packet(packet)
|
548 |
+
|
549 |
+
if conn.state == TCPState.FIN_WAIT_2:
|
550 |
+
conn.state = TCPState.TIME_WAIT
|
551 |
+
conn.time_wait_start = time.time()
|
552 |
+
else:
|
553 |
+
conn.state = TCPState.CLOSING
|
554 |
+
|
555 |
+
return True
|
556 |
+
|
557 |
+
def _handle_fin_wait_2(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
558 |
+
"""Handle segment in FIN_WAIT_2 state"""
|
559 |
+
if tcp_header.fin:
|
560 |
+
conn.local_ack += 1
|
561 |
+
|
562 |
+
# Send ACK
|
563 |
+
segment = self._create_tcp_segment(conn, 0x10) # ACK flag
|
564 |
+
packet = self._build_tcp_packet(conn, segment)
|
565 |
+
self._send_packet(packet)
|
566 |
+
|
567 |
+
conn.state = TCPState.TIME_WAIT
|
568 |
+
conn.time_wait_start = time.time()
|
569 |
+
|
570 |
+
return True
|
571 |
+
|
572 |
+
def _handle_close_wait(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
573 |
+
"""Handle segment in CLOSE_WAIT state"""
|
574 |
+
# Application should close the connection
|
575 |
+
return True
|
576 |
+
|
577 |
+
def _handle_closing(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
578 |
+
"""Handle segment in CLOSING state"""
|
579 |
+
if tcp_header.ack:
|
580 |
+
self._process_ack(conn, tcp_header.ack_num)
|
581 |
+
if not conn.unacked_segments: # Our FIN was ACKed
|
582 |
+
conn.state = TCPState.TIME_WAIT
|
583 |
+
conn.time_wait_start = time.time()
|
584 |
+
|
585 |
+
return True
|
586 |
+
|
587 |
+
def _handle_last_ack(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
588 |
+
"""Handle segment in LAST_ACK state"""
|
589 |
+
if tcp_header.ack:
|
590 |
+
self._process_ack(conn, tcp_header.ack_num)
|
591 |
+
if not conn.unacked_segments: # Our FIN was ACKed
|
592 |
+
conn.state = TCPState.CLOSED
|
593 |
+
self._close_connection(conn)
|
594 |
+
|
595 |
+
return True
|
596 |
+
|
597 |
+
def _handle_time_wait(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
598 |
+
"""Handle segment in TIME_WAIT state"""
|
599 |
+
# Just acknowledge any segments
|
600 |
+
if tcp_header.seq_num == conn.local_ack:
|
601 |
+
segment = self._create_tcp_segment(conn, 0x10) # ACK flag
|
602 |
+
packet = self._build_tcp_packet(conn, segment)
|
603 |
+
self._send_packet(packet)
|
604 |
+
|
605 |
+
return True
|
606 |
+
|
607 |
+
def _process_ack(self, conn: TCPConnection, ack_num: int):
|
608 |
+
"""Process ACK and remove acknowledged segments"""
|
609 |
+
acked_segments = []
|
610 |
+
acked_bytes = 0
|
611 |
+
|
612 |
+
for seq_num, segment in list(conn.unacked_segments.items()):
|
613 |
+
if seq_num < ack_num:
|
614 |
+
acked_segments.append((seq_num, segment))
|
615 |
+
acked_bytes += segment.data_length
|
616 |
+
del conn.unacked_segments[seq_num]
|
617 |
+
|
618 |
+
# Update RTT and congestion window
|
619 |
+
if acked_segments:
|
620 |
+
# Use first acked segment for RTT calculation
|
621 |
+
rtt = time.time() - acked_segments[0][1].timestamp
|
622 |
+
self._update_rto(conn, rtt)
|
623 |
+
self._update_congestion_window(conn, acked_bytes)
|
624 |
+
|
625 |
+
# Try to send more data
|
626 |
+
self._try_send_data(conn)
|
627 |
+
|
628 |
+
def _send_rst(self, ip_header: IPv4Header, tcp_header: TCPHeader):
|
629 |
+
"""Send RST for unknown connection"""
|
630 |
+
# Create RST response
|
631 |
+
rst_ip = IPv4Header(
|
632 |
+
protocol=6,
|
633 |
+
source_ip=ip_header.dest_ip,
|
634 |
+
dest_ip=ip_header.source_ip,
|
635 |
+
ttl=64
|
636 |
+
)
|
637 |
+
|
638 |
+
rst_tcp = TCPHeader(
|
639 |
+
source_port=tcp_header.dest_port,
|
640 |
+
dest_port=tcp_header.source_port,
|
641 |
+
seq_num=tcp_header.ack_num if tcp_header.ack else 0,
|
642 |
+
ack_num=tcp_header.seq_num + 1 if tcp_header.syn else tcp_header.seq_num,
|
643 |
+
flags=0x14 if tcp_header.ack else 0x04 # RST+ACK or RST
|
644 |
+
)
|
645 |
+
|
646 |
+
packet = IPParser.build_packet(rst_ip, rst_tcp)
|
647 |
+
self._send_packet(packet)
|
648 |
+
|
649 |
+
def _timer_loop(self):
|
650 |
+
"""Timer loop for handling timeouts"""
|
651 |
+
while self.running:
|
652 |
+
current_time = time.time()
|
653 |
+
|
654 |
+
with self.lock:
|
655 |
+
connections_to_check = list(self.connections.values())
|
656 |
+
|
657 |
+
for conn in connections_to_check:
|
658 |
+
# Handle retransmissions
|
659 |
+
if conn.unacked_segments:
|
660 |
+
self._handle_retransmission(conn)
|
661 |
+
|
662 |
+
# Handle connection timeout
|
663 |
+
if current_time - conn.last_activity > self.connection_timeout:
|
664 |
+
self._close_connection(conn, reset=True)
|
665 |
+
|
666 |
+
# Handle TIME_WAIT timeout
|
667 |
+
if (conn.state == TCPState.TIME_WAIT and
|
668 |
+
conn.time_wait_start and
|
669 |
+
current_time - conn.time_wait_start > self.time_wait_timeout):
|
670 |
+
conn.state = TCPState.CLOSED
|
671 |
+
self._close_connection(conn)
|
672 |
+
|
673 |
+
time.sleep(1) # Check every second
|
674 |
+
|
675 |
+
def start(self):
|
676 |
+
"""Start TCP engine"""
|
677 |
+
self.running = True
|
678 |
+
self.timer_thread = threading.Thread(target=self._timer_loop, daemon=True)
|
679 |
+
self.timer_thread.start()
|
680 |
+
print("TCP engine started")
|
681 |
+
|
682 |
+
def stop(self):
|
683 |
+
"""Stop TCP engine"""
|
684 |
+
self.running = False
|
685 |
+
if self.timer_thread:
|
686 |
+
self.timer_thread.join()
|
687 |
+
|
688 |
+
# Close all connections
|
689 |
+
with self.lock:
|
690 |
+
for conn in list(self.connections.values()):
|
691 |
+
self._close_connection(conn, reset=True)
|
692 |
+
|
693 |
+
print("TCP engine stopped")
|
694 |
+
|
695 |
+
def get_connections(self) -> Dict[str, Dict]:
|
696 |
+
"""Get current connections"""
|
697 |
+
with self.lock:
|
698 |
+
return {
|
699 |
+
conn_id: {
|
700 |
+
'local_ip': conn.local_ip,
|
701 |
+
'local_port': conn.local_port,
|
702 |
+
'remote_ip': conn.remote_ip,
|
703 |
+
'remote_port': conn.remote_port,
|
704 |
+
'state': conn.state.value,
|
705 |
+
'local_seq': conn.local_seq,
|
706 |
+
'local_ack': conn.local_ack,
|
707 |
+
'remote_seq': conn.remote_seq,
|
708 |
+
'remote_ack': conn.remote_ack,
|
709 |
+
'window_size': conn.local_window,
|
710 |
+
'cwnd': conn.cwnd,
|
711 |
+
'unacked_segments': len(conn.unacked_segments),
|
712 |
+
'last_activity': conn.last_activity
|
713 |
+
}
|
714 |
+
for conn_id, conn in self.connections.items()
|
715 |
+
}
|
716 |
+
|
core/traffic_router.py
ADDED
@@ -0,0 +1,455 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Traffic Router Module
|
3 |
+
|
4 |
+
Handles routing of all client traffic through VPN for free data access
|
5 |
+
"""
|
6 |
+
|
7 |
+
import os
|
8 |
+
import json
|
9 |
+
import subprocess
|
10 |
+
import threading
|
11 |
+
import time
|
12 |
+
import logging
|
13 |
+
from typing import Dict, List, Optional, Any
|
14 |
+
from dataclasses import dataclass, asdict
|
15 |
+
import ipaddress
|
16 |
+
|
17 |
+
logger = logging.getLogger(__name__)
|
18 |
+
|
19 |
+
@dataclass
|
20 |
+
class RoutingRule:
|
21 |
+
"""Represents a traffic routing rule"""
|
22 |
+
rule_id: str
|
23 |
+
source_network: str
|
24 |
+
destination_network: str
|
25 |
+
gateway: str
|
26 |
+
interface: str
|
27 |
+
priority: int
|
28 |
+
enabled: bool = True
|
29 |
+
created_at: float = None
|
30 |
+
|
31 |
+
def __post_init__(self):
|
32 |
+
if self.created_at is None:
|
33 |
+
self.created_at = time.time()
|
34 |
+
|
35 |
+
@dataclass
|
36 |
+
class ClientRoute:
|
37 |
+
"""Represents a client's routing configuration"""
|
38 |
+
client_id: str
|
39 |
+
client_ip: str
|
40 |
+
vpn_gateway: str
|
41 |
+
original_gateway: str
|
42 |
+
routes_applied: List[str]
|
43 |
+
status: str = "active"
|
44 |
+
created_at: float = None
|
45 |
+
|
46 |
+
def __post_init__(self):
|
47 |
+
if self.created_at is None:
|
48 |
+
self.created_at = time.time()
|
49 |
+
|
50 |
+
class TrafficRouter:
|
51 |
+
"""Manages traffic routing for VPN clients to enable free data access"""
|
52 |
+
|
53 |
+
def __init__(self, config: Dict[str, Any]):
|
54 |
+
self.config = config
|
55 |
+
self.routing_rules: Dict[str, RoutingRule] = {}
|
56 |
+
self.client_routes: Dict[str, ClientRoute] = {}
|
57 |
+
self.vpn_network = ipaddress.IPv4Network("10.8.0.0/24")
|
58 |
+
self.vpn_gateway = "10.8.0.1"
|
59 |
+
self.is_running = False
|
60 |
+
|
61 |
+
# Integration with other components
|
62 |
+
self.nat_engine = None
|
63 |
+
self.firewall = None
|
64 |
+
self.dhcp_server = None
|
65 |
+
|
66 |
+
# Default routing rules for free data access
|
67 |
+
self.default_rules = [
|
68 |
+
{
|
69 |
+
"rule_id": "default_vpn_route",
|
70 |
+
"source_network": "0.0.0.0/0",
|
71 |
+
"destination_network": "0.0.0.0/0",
|
72 |
+
"gateway": self.vpn_gateway,
|
73 |
+
"interface": "tun0",
|
74 |
+
"priority": 100
|
75 |
+
},
|
76 |
+
{
|
77 |
+
"rule_id": "local_network_direct",
|
78 |
+
"source_network": "192.168.0.0/16",
|
79 |
+
"destination_network": "192.168.0.0/16",
|
80 |
+
"gateway": "direct",
|
81 |
+
"interface": "eth0",
|
82 |
+
"priority": 50
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"rule_id": "vpn_network_direct",
|
86 |
+
"source_network": "10.8.0.0/24",
|
87 |
+
"destination_network": "10.8.0.0/24",
|
88 |
+
"gateway": "direct",
|
89 |
+
"interface": "tun0",
|
90 |
+
"priority": 50
|
91 |
+
}
|
92 |
+
]
|
93 |
+
|
94 |
+
logger.info("Traffic Router initialized")
|
95 |
+
|
96 |
+
def set_components(self, nat_engine=None, firewall=None, dhcp_server=None):
|
97 |
+
"""Set references to other ISP stack components"""
|
98 |
+
self.nat_engine = nat_engine
|
99 |
+
self.firewall = firewall
|
100 |
+
self.dhcp_server = dhcp_server
|
101 |
+
logger.info("Traffic Router components set")
|
102 |
+
|
103 |
+
def start(self):
|
104 |
+
"""Start the traffic router"""
|
105 |
+
try:
|
106 |
+
if self.is_running:
|
107 |
+
logger.warning("Traffic Router is already running")
|
108 |
+
return True
|
109 |
+
|
110 |
+
# Initialize default routing rules
|
111 |
+
self._initialize_default_rules()
|
112 |
+
|
113 |
+
# Setup VPN routing infrastructure
|
114 |
+
self._setup_vpn_routing()
|
115 |
+
|
116 |
+
self.is_running = True
|
117 |
+
logger.info("Traffic Router started successfully")
|
118 |
+
return True
|
119 |
+
|
120 |
+
except Exception as e:
|
121 |
+
logger.error(f"Error starting Traffic Router: {e}")
|
122 |
+
return False
|
123 |
+
|
124 |
+
def stop(self):
|
125 |
+
"""Stop the traffic router"""
|
126 |
+
try:
|
127 |
+
if not self.is_running:
|
128 |
+
logger.warning("Traffic Router is not running")
|
129 |
+
return True
|
130 |
+
|
131 |
+
# Clean up all client routes
|
132 |
+
for client_id in list(self.client_routes.keys()):
|
133 |
+
self.remove_client_route(client_id)
|
134 |
+
|
135 |
+
# Clean up routing rules
|
136 |
+
self._cleanup_routing_rules()
|
137 |
+
|
138 |
+
self.is_running = False
|
139 |
+
logger.info("Traffic Router stopped")
|
140 |
+
return True
|
141 |
+
|
142 |
+
except Exception as e:
|
143 |
+
logger.error(f"Error stopping Traffic Router: {e}")
|
144 |
+
return False
|
145 |
+
|
146 |
+
def _initialize_default_rules(self):
|
147 |
+
"""Initialize default routing rules"""
|
148 |
+
try:
|
149 |
+
for rule_config in self.default_rules:
|
150 |
+
rule = RoutingRule(**rule_config)
|
151 |
+
self.routing_rules[rule.rule_id] = rule
|
152 |
+
logger.debug(f"Initialized routing rule: {rule.rule_id}")
|
153 |
+
|
154 |
+
logger.info(f"Initialized {len(self.default_rules)} default routing rules")
|
155 |
+
|
156 |
+
except Exception as e:
|
157 |
+
logger.error(f"Error initializing default rules: {e}")
|
158 |
+
|
159 |
+
def _setup_vpn_routing(self):
|
160 |
+
"""Setup VPN routing infrastructure"""
|
161 |
+
try:
|
162 |
+
# Enable IP forwarding
|
163 |
+
self._enable_ip_forwarding()
|
164 |
+
|
165 |
+
# Setup iptables rules for VPN traffic
|
166 |
+
self._setup_iptables_rules()
|
167 |
+
|
168 |
+
# Configure routing tables
|
169 |
+
self._configure_routing_tables()
|
170 |
+
|
171 |
+
logger.info("VPN routing infrastructure setup completed")
|
172 |
+
|
173 |
+
except Exception as e:
|
174 |
+
logger.error(f"Error setting up VPN routing: {e}")
|
175 |
+
|
176 |
+
def _enable_ip_forwarding(self):
|
177 |
+
"""Enable IP forwarding on the system"""
|
178 |
+
try:
|
179 |
+
# Enable IPv4 forwarding
|
180 |
+
subprocess.run([
|
181 |
+
"/usr/bin/sudo", "sysctl", "-w", "net.ipv4.ip_forward=1"
|
182 |
+
], check=True, capture_output=True)
|
183 |
+
|
184 |
+
# Make it persistent
|
185 |
+
with open("/tmp/99-ip-forward.conf", "w") as f:
|
186 |
+
f.write("net.ipv4.ip_forward=1\n")
|
187 |
+
|
188 |
+
subprocess.run([
|
189 |
+
"/usr/bin/sudo", "cp", "/tmp/99-ip-forward.conf", "/etc/sysctl.d/"
|
190 |
+
], check=True, capture_output=True)
|
191 |
+
|
192 |
+
logger.info("IP forwarding enabled")
|
193 |
+
|
194 |
+
except subprocess.CalledProcessError as e:
|
195 |
+
logger.warning(f"Could not enable IP forwarding: {e}")
|
196 |
+
except Exception as e:
|
197 |
+
logger.error(f"Error enabling IP forwarding: {e}")
|
198 |
+
|
199 |
+
def _setup_iptables_rules(self):
|
200 |
+
"""Setup iptables rules for VPN traffic routing"""
|
201 |
+
try:
|
202 |
+
# Note: In a production environment, these would be actual iptables commands
|
203 |
+
# For this implementation, we'll log the commands that would be executed
|
204 |
+
|
205 |
+
iptables_rules = [
|
206 |
+
# Allow VPN traffic
|
207 |
+
"iptables -A INPUT -i tun0 -j ACCEPT",
|
208 |
+
"iptables -A FORWARD -i tun0 -j ACCEPT",
|
209 |
+
"iptables -A OUTPUT -o tun0 -j ACCEPT",
|
210 |
+
|
211 |
+
# NAT rules for VPN clients
|
212 |
+
"iptables -t nat -A POSTROUTING -s 10.8.0.0/24 -o eth0 -j MASQUERADE",
|
213 |
+
"iptables -t nat -A POSTROUTING -s 10.8.0.0/24 -o tun0 -j MASQUERADE",
|
214 |
+
|
215 |
+
# Forward VPN traffic
|
216 |
+
"iptables -A FORWARD -s 10.8.0.0/24 -j ACCEPT",
|
217 |
+
"iptables -A FORWARD -d 10.8.0.0/24 -j ACCEPT",
|
218 |
+
|
219 |
+
# Mark VPN traffic for special routing
|
220 |
+
"iptables -t mangle -A PREROUTING -s 10.8.0.0/24 -j MARK --set-mark 100",
|
221 |
+
"iptables -t mangle -A OUTPUT -s 10.8.0.0/24 -j MARK --set-mark 100"
|
222 |
+
]
|
223 |
+
|
224 |
+
for rule in iptables_rules:
|
225 |
+
logger.debug(f"Would execute: {rule}")
|
226 |
+
# In a real implementation, you would execute these with subprocess
|
227 |
+
# subprocess.run(rule.split(), check=True, capture_output=True)
|
228 |
+
|
229 |
+
logger.info("Iptables rules configured for VPN routing")
|
230 |
+
|
231 |
+
except Exception as e:
|
232 |
+
logger.error(f"Error setting up iptables rules: {e}")
|
233 |
+
|
234 |
+
def _configure_routing_tables(self):
|
235 |
+
"""Configure custom routing tables for VPN traffic"""
|
236 |
+
try:
|
237 |
+
# Create custom routing table for VPN traffic
|
238 |
+
routing_commands = [
|
239 |
+
# Add custom routing table
|
240 |
+
"echo '100 vpn_table' >> /etc/iproute2/rt_tables",
|
241 |
+
|
242 |
+
# Add routes to custom table
|
243 |
+
"ip route add default via 10.8.0.1 dev tun0 table vpn_table",
|
244 |
+
"ip route add 10.8.0.0/24 dev tun0 table vpn_table",
|
245 |
+
|
246 |
+
# Add routing rules
|
247 |
+
"ip rule add fwmark 100 table vpn_table",
|
248 |
+
"ip rule add from 10.8.0.0/24 table vpn_table"
|
249 |
+
]
|
250 |
+
|
251 |
+
for cmd in routing_commands:
|
252 |
+
logger.debug(f"Would execute: {cmd}")
|
253 |
+
# In a real implementation, you would execute these with subprocess
|
254 |
+
# subprocess.run(cmd.split(), check=True, capture_output=True)
|
255 |
+
|
256 |
+
logger.info("Custom routing tables configured")
|
257 |
+
|
258 |
+
except Exception as e:
|
259 |
+
logger.error(f"Error configuring routing tables: {e}")
|
260 |
+
|
261 |
+
def add_client_route(self, client_id: str, client_ip: str, original_gateway: str = None) -> bool:
|
262 |
+
"""Add routing configuration for a VPN client"""
|
263 |
+
try:
|
264 |
+
if client_id in self.client_routes:
|
265 |
+
logger.warning(f"Client route already exists for {client_id}")
|
266 |
+
return True
|
267 |
+
|
268 |
+
# Create client route configuration
|
269 |
+
client_route = ClientRoute(
|
270 |
+
client_id=client_id,
|
271 |
+
client_ip=client_ip,
|
272 |
+
vpn_gateway=self.vpn_gateway,
|
273 |
+
original_gateway=original_gateway or "auto",
|
274 |
+
routes_applied=[]
|
275 |
+
)
|
276 |
+
|
277 |
+
# Apply routing rules for this client
|
278 |
+
routes_applied = self._apply_client_routing_rules(client_route)
|
279 |
+
client_route.routes_applied = routes_applied
|
280 |
+
|
281 |
+
# Store client route
|
282 |
+
self.client_routes[client_id] = client_route
|
283 |
+
|
284 |
+
# Integrate with NAT engine if available
|
285 |
+
if self.nat_engine:
|
286 |
+
self._configure_nat_for_client(client_route)
|
287 |
+
|
288 |
+
logger.info(f"Added routing configuration for client {client_id} ({client_ip})")
|
289 |
+
return True
|
290 |
+
|
291 |
+
except Exception as e:
|
292 |
+
logger.error(f"Error adding client route for {client_id}: {e}")
|
293 |
+
return False
|
294 |
+
|
295 |
+
def remove_client_route(self, client_id: str) -> bool:
|
296 |
+
"""Remove routing configuration for a VPN client"""
|
297 |
+
try:
|
298 |
+
if client_id not in self.client_routes:
|
299 |
+
logger.warning(f"No route configuration found for client {client_id}")
|
300 |
+
return True
|
301 |
+
|
302 |
+
client_route = self.client_routes[client_id]
|
303 |
+
|
304 |
+
# Remove applied routing rules
|
305 |
+
self._remove_client_routing_rules(client_route)
|
306 |
+
|
307 |
+
# Remove NAT configuration if available
|
308 |
+
if self.nat_engine:
|
309 |
+
self._remove_nat_for_client(client_route)
|
310 |
+
|
311 |
+
# Remove from storage
|
312 |
+
del self.client_routes[client_id]
|
313 |
+
|
314 |
+
logger.info(f"Removed routing configuration for client {client_id}")
|
315 |
+
return True
|
316 |
+
|
317 |
+
except Exception as e:
|
318 |
+
logger.error(f"Error removing client route for {client_id}: {e}")
|
319 |
+
return False
|
320 |
+
|
321 |
+
def _apply_client_routing_rules(self, client_route: ClientRoute) -> List[str]:
|
322 |
+
"""Apply routing rules for a specific client"""
|
323 |
+
applied_routes = []
|
324 |
+
|
325 |
+
try:
|
326 |
+
# Route all client traffic through VPN
|
327 |
+
route_commands = [
|
328 |
+
f"ip route add default via {client_route.vpn_gateway} dev tun0 src {client_route.client_ip}",
|
329 |
+
f"ip route add {client_route.client_ip}/32 dev tun0",
|
330 |
+
f"iptables -t nat -A POSTROUTING -s {client_route.client_ip} -j MASQUERADE"
|
331 |
+
]
|
332 |
+
|
333 |
+
for cmd in route_commands:
|
334 |
+
logger.debug(f"Would execute for client {client_route.client_id}: {cmd}")
|
335 |
+
applied_routes.append(cmd)
|
336 |
+
# In a real implementation, you would execute these with subprocess
|
337 |
+
|
338 |
+
logger.debug(f"Applied {len(applied_routes)} routing rules for client {client_route.client_id}")
|
339 |
+
|
340 |
+
except Exception as e:
|
341 |
+
logger.error(f"Error applying routing rules for client {client_route.client_id}: {e}")
|
342 |
+
|
343 |
+
return applied_routes
|
344 |
+
|
345 |
+
def _remove_client_routing_rules(self, client_route: ClientRoute):
|
346 |
+
"""Remove routing rules for a specific client"""
|
347 |
+
try:
|
348 |
+
# Remove the applied routes (reverse of apply)
|
349 |
+
for route_cmd in client_route.routes_applied:
|
350 |
+
# Convert add commands to delete commands
|
351 |
+
delete_cmd = route_cmd.replace(" add ", " del ").replace(" -A ", " -D ")
|
352 |
+
logger.debug(f"Would execute for cleanup: {delete_cmd}")
|
353 |
+
# In a real implementation, you would execute these with subprocess
|
354 |
+
|
355 |
+
logger.debug(f"Removed routing rules for client {client_route.client_id}")
|
356 |
+
|
357 |
+
except Exception as e:
|
358 |
+
logger.error(f"Error removing routing rules for client {client_route.client_id}: {e}")
|
359 |
+
|
360 |
+
def _configure_nat_for_client(self, client_route: ClientRoute):
|
361 |
+
"""Configure NAT for VPN client"""
|
362 |
+
try:
|
363 |
+
if not self.nat_engine:
|
364 |
+
return
|
365 |
+
|
366 |
+
# Register client with NAT engine for special handling
|
367 |
+
nat_config = {
|
368 |
+
"client_id": client_route.client_id,
|
369 |
+
"client_ip": client_route.client_ip,
|
370 |
+
"vpn_gateway": client_route.vpn_gateway,
|
371 |
+
"routing_mode": "vpn_tunnel"
|
372 |
+
}
|
373 |
+
|
374 |
+
# This would integrate with the existing NAT engine
|
375 |
+
logger.debug(f"Would configure NAT for VPN client: {nat_config}")
|
376 |
+
|
377 |
+
except Exception as e:
|
378 |
+
logger.error(f"Error configuring NAT for client {client_route.client_id}: {e}")
|
379 |
+
|
380 |
+
def _remove_nat_for_client(self, client_route: ClientRoute):
|
381 |
+
"""Remove NAT configuration for VPN client"""
|
382 |
+
try:
|
383 |
+
if not self.nat_engine:
|
384 |
+
return
|
385 |
+
|
386 |
+
# Remove client from NAT engine
|
387 |
+
logger.debug(f"Would remove NAT configuration for client {client_route.client_id}")
|
388 |
+
|
389 |
+
except Exception as e:
|
390 |
+
logger.error(f"Error removing NAT for client {client_route.client_id}: {e}")
|
391 |
+
|
392 |
+
def _cleanup_routing_rules(self):
|
393 |
+
"""Clean up all routing rules"""
|
394 |
+
try:
|
395 |
+
# Remove custom routing table entries
|
396 |
+
cleanup_commands = [
|
397 |
+
"ip rule del fwmark 100 table vpn_table",
|
398 |
+
"ip rule del from 10.8.0.0/24 table vpn_table",
|
399 |
+
"ip route flush table vpn_table"
|
400 |
+
]
|
401 |
+
|
402 |
+
for cmd in cleanup_commands:
|
403 |
+
logger.debug(f"Would execute for cleanup: {cmd}")
|
404 |
+
# In a real implementation, you would execute these with subprocess
|
405 |
+
|
406 |
+
logger.info("Routing rules cleaned up")
|
407 |
+
|
408 |
+
except Exception as e:
|
409 |
+
logger.error(f"Error cleaning up routing rules: {e}")
|
410 |
+
|
411 |
+
def get_client_routes(self) -> List[Dict[str, Any]]:
|
412 |
+
"""Get list of all client routes"""
|
413 |
+
return [asdict(route) for route in self.client_routes.values()]
|
414 |
+
|
415 |
+
def get_routing_rules(self) -> List[Dict[str, Any]]:
|
416 |
+
"""Get list of all routing rules"""
|
417 |
+
return [asdict(rule) for rule in self.routing_rules.values()]
|
418 |
+
|
419 |
+
def get_stats(self) -> Dict[str, Any]:
|
420 |
+
"""Get traffic router statistics"""
|
421 |
+
return {
|
422 |
+
"is_running": self.is_running,
|
423 |
+
"total_clients": len(self.client_routes),
|
424 |
+
"active_clients": len([r for r in self.client_routes.values() if r.status == "active"]),
|
425 |
+
"total_rules": len(self.routing_rules),
|
426 |
+
"enabled_rules": len([r for r in self.routing_rules.values() if r.enabled]),
|
427 |
+
"vpn_network": str(self.vpn_network),
|
428 |
+
"vpn_gateway": self.vpn_gateway
|
429 |
+
}
|
430 |
+
|
431 |
+
def update_client_status(self, client_id: str, status: str) -> bool:
|
432 |
+
"""Update the status of a client route"""
|
433 |
+
try:
|
434 |
+
if client_id not in self.client_routes:
|
435 |
+
return False
|
436 |
+
|
437 |
+
self.client_routes[client_id].status = status
|
438 |
+
logger.debug(f"Updated client {client_id} status to {status}")
|
439 |
+
return True
|
440 |
+
|
441 |
+
except Exception as e:
|
442 |
+
logger.error(f"Error updating client status: {e}")
|
443 |
+
return False
|
444 |
+
|
445 |
+
def is_client_routed(self, client_id: str) -> bool:
|
446 |
+
"""Check if a client is currently routed through VPN"""
|
447 |
+
return (client_id in self.client_routes and
|
448 |
+
self.client_routes[client_id].status == "active")
|
449 |
+
|
450 |
+
def get_client_route_info(self, client_id: str) -> Optional[Dict[str, Any]]:
|
451 |
+
"""Get routing information for a specific client"""
|
452 |
+
if client_id in self.client_routes:
|
453 |
+
return asdict(self.client_routes[client_id])
|
454 |
+
return None
|
455 |
+
|
core/virtual_router.py
ADDED
@@ -0,0 +1,565 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Virtual Router Module
|
3 |
+
|
4 |
+
Implements packet routing between virtual clients and external internet:
|
5 |
+
- Maintain routing table for virtual network
|
6 |
+
- Forward packets based on destination IP
|
7 |
+
- Handle internal vs external routing decisions
|
8 |
+
- Support static route configuration
|
9 |
+
"""
|
10 |
+
|
11 |
+
import ipaddress
|
12 |
+
import time
|
13 |
+
import threading
|
14 |
+
from typing import Dict, List, Optional, Tuple, Set
|
15 |
+
from dataclasses import dataclass
|
16 |
+
from enum import Enum
|
17 |
+
|
18 |
+
from .ip_parser import ParsedPacket, IPv4Header
|
19 |
+
|
20 |
+
|
21 |
+
class RouteType(Enum):
|
22 |
+
DIRECT = "DIRECT" # Directly connected network
|
23 |
+
STATIC = "STATIC" # Static route
|
24 |
+
DEFAULT = "DEFAULT" # Default route
|
25 |
+
|
26 |
+
|
27 |
+
@dataclass
|
28 |
+
class RouteEntry:
|
29 |
+
"""Represents a routing table entry"""
|
30 |
+
destination: str # Network in CIDR notation (e.g., "10.0.0.0/24")
|
31 |
+
gateway: Optional[str] # Next hop IP (None for direct routes)
|
32 |
+
interface: str # Interface name or identifier
|
33 |
+
metric: int # Route metric (lower is preferred)
|
34 |
+
route_type: RouteType
|
35 |
+
created_time: float
|
36 |
+
last_used: Optional[float] = None
|
37 |
+
use_count: int = 0
|
38 |
+
|
39 |
+
def __post_init__(self):
|
40 |
+
if self.created_time == 0:
|
41 |
+
self.created_time = time.time()
|
42 |
+
|
43 |
+
def record_use(self):
|
44 |
+
"""Record route usage"""
|
45 |
+
self.use_count += 1
|
46 |
+
self.last_used = time.time()
|
47 |
+
|
48 |
+
def matches_destination(self, ip: str) -> bool:
|
49 |
+
"""Check if this route matches the destination IP"""
|
50 |
+
try:
|
51 |
+
network = ipaddress.ip_network(self.destination, strict=False)
|
52 |
+
return ipaddress.ip_address(ip) in network
|
53 |
+
except (ipaddress.AddressValueError, ValueError):
|
54 |
+
return False
|
55 |
+
|
56 |
+
def to_dict(self) -> Dict:
|
57 |
+
"""Convert route to dictionary"""
|
58 |
+
return {
|
59 |
+
'destination': self.destination,
|
60 |
+
'gateway': self.gateway,
|
61 |
+
'interface': self.interface,
|
62 |
+
'metric': self.metric,
|
63 |
+
'route_type': self.route_type.value,
|
64 |
+
'created_time': self.created_time,
|
65 |
+
'last_used': self.last_used,
|
66 |
+
'use_count': self.use_count
|
67 |
+
}
|
68 |
+
|
69 |
+
|
70 |
+
@dataclass
|
71 |
+
class Interface:
|
72 |
+
"""Represents a network interface"""
|
73 |
+
name: str
|
74 |
+
ip_address: str
|
75 |
+
netmask: str
|
76 |
+
network: str # Network in CIDR notation
|
77 |
+
enabled: bool = True
|
78 |
+
mtu: int = 1500
|
79 |
+
created_time: float = 0
|
80 |
+
|
81 |
+
def __post_init__(self):
|
82 |
+
if self.created_time == 0:
|
83 |
+
self.created_time = time.time()
|
84 |
+
|
85 |
+
# Calculate network if not provided
|
86 |
+
if not self.network:
|
87 |
+
try:
|
88 |
+
interface_network = ipaddress.ip_interface(f"{self.ip_address}/{self.netmask}")
|
89 |
+
self.network = str(interface_network.network)
|
90 |
+
except (ipaddress.AddressValueError, ValueError):
|
91 |
+
self.network = "0.0.0.0/0"
|
92 |
+
|
93 |
+
def is_local_address(self, ip: str) -> bool:
|
94 |
+
"""Check if IP address belongs to this interface's network"""
|
95 |
+
try:
|
96 |
+
network = ipaddress.ip_network(self.network, strict=False)
|
97 |
+
return ipaddress.ip_address(ip) in network
|
98 |
+
except (ipaddress.AddressValueError, ValueError):
|
99 |
+
return False
|
100 |
+
|
101 |
+
def to_dict(self) -> Dict:
|
102 |
+
"""Convert interface to dictionary"""
|
103 |
+
return {
|
104 |
+
'name': self.name,
|
105 |
+
'ip_address': self.ip_address,
|
106 |
+
'netmask': self.netmask,
|
107 |
+
'network': self.network,
|
108 |
+
'enabled': self.enabled,
|
109 |
+
'mtu': self.mtu,
|
110 |
+
'created_time': self.created_time
|
111 |
+
}
|
112 |
+
|
113 |
+
|
114 |
+
class VirtualRouter:
|
115 |
+
"""Virtual router implementation"""
|
116 |
+
|
117 |
+
def __init__(self, config: Dict):
|
118 |
+
self.config = config
|
119 |
+
self.routing_table: List[RouteEntry] = []
|
120 |
+
self.interfaces: Dict[str, Interface] = {}
|
121 |
+
self.arp_table: Dict[str, str] = {} # IP -> MAC mapping
|
122 |
+
self.lock = threading.Lock()
|
123 |
+
|
124 |
+
# Router configuration
|
125 |
+
self.router_id = config.get('router_id', 'virtual-router-1')
|
126 |
+
self.default_gateway = config.get('default_gateway')
|
127 |
+
|
128 |
+
# Statistics
|
129 |
+
self.stats = {
|
130 |
+
'packets_routed': 0,
|
131 |
+
'packets_dropped': 0,
|
132 |
+
'route_lookups': 0,
|
133 |
+
'arp_requests': 0,
|
134 |
+
'arp_replies': 0,
|
135 |
+
'routing_errors': 0
|
136 |
+
}
|
137 |
+
|
138 |
+
# Initialize interfaces and routes
|
139 |
+
self._initialize_interfaces()
|
140 |
+
self._initialize_routes()
|
141 |
+
|
142 |
+
def _initialize_interfaces(self):
|
143 |
+
"""Initialize network interfaces from configuration"""
|
144 |
+
interfaces_config = self.config.get('interfaces', [])
|
145 |
+
|
146 |
+
for iface_config in interfaces_config:
|
147 |
+
interface = Interface(
|
148 |
+
name=iface_config['name'],
|
149 |
+
ip_address=iface_config['ip_address'],
|
150 |
+
netmask=iface_config.get('netmask', '255.255.255.0'),
|
151 |
+
network=iface_config.get('network'),
|
152 |
+
enabled=iface_config.get('enabled', True),
|
153 |
+
mtu=iface_config.get('mtu', 1500)
|
154 |
+
)
|
155 |
+
|
156 |
+
with self.lock:
|
157 |
+
self.interfaces[interface.name] = interface
|
158 |
+
|
159 |
+
# Add direct route for interface network
|
160 |
+
self.add_route(
|
161 |
+
destination=interface.network,
|
162 |
+
gateway=None,
|
163 |
+
interface=interface.name,
|
164 |
+
metric=0,
|
165 |
+
route_type=RouteType.DIRECT
|
166 |
+
)
|
167 |
+
|
168 |
+
def _initialize_routes(self):
|
169 |
+
"""Initialize static routes from configuration"""
|
170 |
+
routes_config = self.config.get('static_routes', [])
|
171 |
+
|
172 |
+
for route_config in routes_config:
|
173 |
+
self.add_route(
|
174 |
+
destination=route_config['destination'],
|
175 |
+
gateway=route_config.get('gateway'),
|
176 |
+
interface=route_config['interface'],
|
177 |
+
metric=route_config.get('metric', 10),
|
178 |
+
route_type=RouteType.STATIC
|
179 |
+
)
|
180 |
+
|
181 |
+
# Add default route if configured
|
182 |
+
if self.default_gateway:
|
183 |
+
# Find interface for default gateway
|
184 |
+
default_interface = None
|
185 |
+
for interface in self.interfaces.values():
|
186 |
+
if interface.is_local_address(self.default_gateway):
|
187 |
+
default_interface = interface.name
|
188 |
+
break
|
189 |
+
|
190 |
+
if default_interface:
|
191 |
+
self.add_route(
|
192 |
+
destination="0.0.0.0/0",
|
193 |
+
gateway=self.default_gateway,
|
194 |
+
interface=default_interface,
|
195 |
+
metric=100,
|
196 |
+
route_type=RouteType.DEFAULT
|
197 |
+
)
|
198 |
+
|
199 |
+
def add_interface(self, name: str, ip_address: str, netmask: str = "255.255.255.0",
|
200 |
+
network: Optional[str] = None, mtu: int = 1500) -> bool:
|
201 |
+
"""Add network interface"""
|
202 |
+
with self.lock:
|
203 |
+
if name in self.interfaces:
|
204 |
+
return False
|
205 |
+
|
206 |
+
interface = Interface(
|
207 |
+
name=name,
|
208 |
+
ip_address=ip_address,
|
209 |
+
netmask=netmask,
|
210 |
+
network=network,
|
211 |
+
mtu=mtu
|
212 |
+
)
|
213 |
+
|
214 |
+
self.interfaces[name] = interface
|
215 |
+
|
216 |
+
# Add direct route for interface network
|
217 |
+
self.add_route(
|
218 |
+
destination=interface.network,
|
219 |
+
gateway=None,
|
220 |
+
interface=name,
|
221 |
+
metric=0,
|
222 |
+
route_type=RouteType.DIRECT
|
223 |
+
)
|
224 |
+
|
225 |
+
return True
|
226 |
+
|
227 |
+
def remove_interface(self, name: str) -> bool:
|
228 |
+
"""Remove network interface"""
|
229 |
+
with self.lock:
|
230 |
+
if name not in self.interfaces:
|
231 |
+
return False
|
232 |
+
|
233 |
+
# Remove interface
|
234 |
+
del self.interfaces[name]
|
235 |
+
|
236 |
+
# Remove routes associated with this interface
|
237 |
+
self.routing_table = [
|
238 |
+
route for route in self.routing_table
|
239 |
+
if route.interface != name
|
240 |
+
]
|
241 |
+
|
242 |
+
return True
|
243 |
+
|
244 |
+
def enable_interface(self, name: str) -> bool:
|
245 |
+
"""Enable network interface"""
|
246 |
+
with self.lock:
|
247 |
+
if name in self.interfaces:
|
248 |
+
self.interfaces[name].enabled = True
|
249 |
+
return True
|
250 |
+
return False
|
251 |
+
|
252 |
+
def disable_interface(self, name: str) -> bool:
|
253 |
+
"""Disable network interface"""
|
254 |
+
with self.lock:
|
255 |
+
if name in self.interfaces:
|
256 |
+
self.interfaces[name].enabled = False
|
257 |
+
return True
|
258 |
+
return False
|
259 |
+
|
260 |
+
def add_route(self, destination: str, gateway: Optional[str], interface: str,
|
261 |
+
metric: int = 10, route_type: RouteType = RouteType.STATIC) -> bool:
|
262 |
+
"""Add route to routing table"""
|
263 |
+
try:
|
264 |
+
# Validate destination network
|
265 |
+
ipaddress.ip_network(destination, strict=False)
|
266 |
+
|
267 |
+
# Validate gateway if provided
|
268 |
+
if gateway:
|
269 |
+
ipaddress.ip_address(gateway)
|
270 |
+
|
271 |
+
route = RouteEntry(
|
272 |
+
destination=destination,
|
273 |
+
gateway=gateway,
|
274 |
+
interface=interface,
|
275 |
+
metric=metric,
|
276 |
+
route_type=route_type,
|
277 |
+
created_time=time.time()
|
278 |
+
)
|
279 |
+
|
280 |
+
with self.lock:
|
281 |
+
# Check if interface exists
|
282 |
+
if interface not in self.interfaces:
|
283 |
+
return False
|
284 |
+
|
285 |
+
# Remove existing route with same destination and interface
|
286 |
+
self.routing_table = [
|
287 |
+
r for r in self.routing_table
|
288 |
+
if not (r.destination == destination and r.interface == interface)
|
289 |
+
]
|
290 |
+
|
291 |
+
# Add new route
|
292 |
+
self.routing_table.append(route)
|
293 |
+
|
294 |
+
# Sort by metric (lower metric = higher priority)
|
295 |
+
self.routing_table.sort(key=lambda r: (r.metric, r.created_time))
|
296 |
+
|
297 |
+
return True
|
298 |
+
|
299 |
+
except (ipaddress.AddressValueError, ValueError):
|
300 |
+
return False
|
301 |
+
|
302 |
+
def remove_route(self, destination: str, interface: str) -> bool:
|
303 |
+
"""Remove route from routing table"""
|
304 |
+
with self.lock:
|
305 |
+
original_count = len(self.routing_table)
|
306 |
+
self.routing_table = [
|
307 |
+
route for route in self.routing_table
|
308 |
+
if not (route.destination == destination and route.interface == interface)
|
309 |
+
]
|
310 |
+
return len(self.routing_table) < original_count
|
311 |
+
|
312 |
+
def lookup_route(self, destination_ip: str) -> Optional[RouteEntry]:
|
313 |
+
"""Look up route for destination IP"""
|
314 |
+
self.stats['route_lookups'] += 1
|
315 |
+
|
316 |
+
with self.lock:
|
317 |
+
# Find all matching routes
|
318 |
+
matching_routes = []
|
319 |
+
for route in self.routing_table:
|
320 |
+
# Skip disabled interfaces
|
321 |
+
interface = self.interfaces.get(route.interface)
|
322 |
+
if not interface or not interface.enabled:
|
323 |
+
continue
|
324 |
+
|
325 |
+
if route.matches_destination(destination_ip):
|
326 |
+
matching_routes.append(route)
|
327 |
+
|
328 |
+
if not matching_routes:
|
329 |
+
self.stats['routing_errors'] += 1
|
330 |
+
return None
|
331 |
+
|
332 |
+
# Sort by specificity (longest prefix match) and then by metric
|
333 |
+
def route_priority(route):
|
334 |
+
try:
|
335 |
+
network = ipaddress.ip_network(route.destination, strict=False)
|
336 |
+
return (-network.prefixlen, route.metric, route.created_time)
|
337 |
+
except:
|
338 |
+
return (0, route.metric, route.created_time)
|
339 |
+
|
340 |
+
matching_routes.sort(key=route_priority)
|
341 |
+
best_route = matching_routes[0]
|
342 |
+
best_route.record_use()
|
343 |
+
|
344 |
+
return best_route
|
345 |
+
|
346 |
+
def route_packet(self, packet: ParsedPacket) -> Optional[Tuple[str, str]]:
|
347 |
+
"""Route packet and return (next_hop_ip, interface)"""
|
348 |
+
self.stats['packets_routed'] += 1
|
349 |
+
|
350 |
+
destination_ip = packet.ip_header.dest_ip
|
351 |
+
|
352 |
+
# Look up route
|
353 |
+
route = self.lookup_route(destination_ip)
|
354 |
+
if not route:
|
355 |
+
self.stats['packets_dropped'] += 1
|
356 |
+
return None
|
357 |
+
|
358 |
+
# Determine next hop
|
359 |
+
if route.gateway:
|
360 |
+
next_hop = route.gateway
|
361 |
+
else:
|
362 |
+
# Direct route - destination is next hop
|
363 |
+
next_hop = destination_ip
|
364 |
+
|
365 |
+
return (next_hop, route.interface)
|
366 |
+
|
367 |
+
def is_local_destination(self, ip: str) -> bool:
|
368 |
+
"""Check if IP is a local destination (belongs to router interfaces)"""
|
369 |
+
with self.lock:
|
370 |
+
for interface in self.interfaces.values():
|
371 |
+
if interface.ip_address == ip:
|
372 |
+
return True
|
373 |
+
return False
|
374 |
+
|
375 |
+
def is_local_network(self, ip: str) -> bool:
|
376 |
+
"""Check if IP belongs to any local network"""
|
377 |
+
with self.lock:
|
378 |
+
for interface in self.interfaces.values():
|
379 |
+
if interface.is_local_address(ip):
|
380 |
+
return True
|
381 |
+
return False
|
382 |
+
|
383 |
+
def get_interface_for_ip(self, ip: str) -> Optional[Interface]:
|
384 |
+
"""Get interface that can reach the given IP"""
|
385 |
+
with self.lock:
|
386 |
+
for interface in self.interfaces.values():
|
387 |
+
if interface.enabled and interface.is_local_address(ip):
|
388 |
+
return interface
|
389 |
+
return None
|
390 |
+
|
391 |
+
def add_arp_entry(self, ip: str, mac: str):
|
392 |
+
"""Add ARP table entry"""
|
393 |
+
with self.lock:
|
394 |
+
self.arp_table[ip] = mac
|
395 |
+
|
396 |
+
def get_arp_entry(self, ip: str) -> Optional[str]:
|
397 |
+
"""Get MAC address from ARP table"""
|
398 |
+
with self.lock:
|
399 |
+
return self.arp_table.get(ip)
|
400 |
+
|
401 |
+
def remove_arp_entry(self, ip: str) -> bool:
|
402 |
+
"""Remove ARP table entry"""
|
403 |
+
with self.lock:
|
404 |
+
if ip in self.arp_table:
|
405 |
+
del self.arp_table[ip]
|
406 |
+
return True
|
407 |
+
return False
|
408 |
+
|
409 |
+
def clear_arp_table(self):
|
410 |
+
"""Clear ARP table"""
|
411 |
+
with self.lock:
|
412 |
+
self.arp_table.clear()
|
413 |
+
|
414 |
+
def get_routing_table(self) -> List[Dict]:
|
415 |
+
"""Get routing table"""
|
416 |
+
with self.lock:
|
417 |
+
return [route.to_dict() for route in self.routing_table]
|
418 |
+
|
419 |
+
def get_interfaces(self) -> Dict[str, Dict]:
|
420 |
+
"""Get network interfaces"""
|
421 |
+
with self.lock:
|
422 |
+
return {
|
423 |
+
name: interface.to_dict()
|
424 |
+
for name, interface in self.interfaces.items()
|
425 |
+
}
|
426 |
+
|
427 |
+
def get_arp_table(self) -> Dict[str, str]:
|
428 |
+
"""Get ARP table"""
|
429 |
+
with self.lock:
|
430 |
+
return self.arp_table.copy()
|
431 |
+
|
432 |
+
def get_stats(self) -> Dict:
|
433 |
+
"""Get router statistics"""
|
434 |
+
with self.lock:
|
435 |
+
stats = self.stats.copy()
|
436 |
+
stats['total_routes'] = len(self.routing_table)
|
437 |
+
stats['total_interfaces'] = len(self.interfaces)
|
438 |
+
stats['enabled_interfaces'] = sum(1 for iface in self.interfaces.values() if iface.enabled)
|
439 |
+
stats['arp_entries'] = len(self.arp_table)
|
440 |
+
|
441 |
+
return stats
|
442 |
+
|
443 |
+
def reset_stats(self):
|
444 |
+
"""Reset router statistics"""
|
445 |
+
self.stats = {
|
446 |
+
'packets_routed': 0,
|
447 |
+
'packets_dropped': 0,
|
448 |
+
'route_lookups': 0,
|
449 |
+
'arp_requests': 0,
|
450 |
+
'arp_replies': 0,
|
451 |
+
'routing_errors': 0
|
452 |
+
}
|
453 |
+
|
454 |
+
# Reset route usage statistics
|
455 |
+
with self.lock:
|
456 |
+
for route in self.routing_table:
|
457 |
+
route.use_count = 0
|
458 |
+
route.last_used = None
|
459 |
+
|
460 |
+
def flush_routes(self, route_type: Optional[RouteType] = None):
|
461 |
+
"""Flush routes of specified type (or all if None)"""
|
462 |
+
with self.lock:
|
463 |
+
if route_type:
|
464 |
+
self.routing_table = [
|
465 |
+
route for route in self.routing_table
|
466 |
+
if route.route_type != route_type
|
467 |
+
]
|
468 |
+
else:
|
469 |
+
self.routing_table.clear()
|
470 |
+
|
471 |
+
def export_config(self) -> Dict:
|
472 |
+
"""Export router configuration"""
|
473 |
+
return {
|
474 |
+
'router_id': self.router_id,
|
475 |
+
'default_gateway': self.default_gateway,
|
476 |
+
'interfaces': [
|
477 |
+
{
|
478 |
+
'name': iface.name,
|
479 |
+
'ip_address': iface.ip_address,
|
480 |
+
'netmask': iface.netmask,
|
481 |
+
'network': iface.network,
|
482 |
+
'enabled': iface.enabled,
|
483 |
+
'mtu': iface.mtu
|
484 |
+
}
|
485 |
+
for iface in self.interfaces.values()
|
486 |
+
],
|
487 |
+
'static_routes': [
|
488 |
+
{
|
489 |
+
'destination': route.destination,
|
490 |
+
'gateway': route.gateway,
|
491 |
+
'interface': route.interface,
|
492 |
+
'metric': route.metric
|
493 |
+
}
|
494 |
+
for route in self.routing_table
|
495 |
+
if route.route_type == RouteType.STATIC
|
496 |
+
]
|
497 |
+
}
|
498 |
+
|
499 |
+
def import_config(self, config: Dict):
|
500 |
+
"""Import router configuration"""
|
501 |
+
# Clear existing configuration
|
502 |
+
with self.lock:
|
503 |
+
self.interfaces.clear()
|
504 |
+
self.routing_table.clear()
|
505 |
+
self.arp_table.clear()
|
506 |
+
|
507 |
+
# Update router settings
|
508 |
+
self.router_id = config.get('router_id', self.router_id)
|
509 |
+
self.default_gateway = config.get('default_gateway', self.default_gateway)
|
510 |
+
|
511 |
+
# Reinitialize from new config
|
512 |
+
self.config.update(config)
|
513 |
+
self._initialize_interfaces()
|
514 |
+
self._initialize_routes()
|
515 |
+
|
516 |
+
|
517 |
+
class RouterUtils:
|
518 |
+
"""Utility functions for router operations"""
|
519 |
+
|
520 |
+
@staticmethod
|
521 |
+
def ip_to_int(ip: str) -> int:
|
522 |
+
"""Convert IP address to integer"""
|
523 |
+
return int(ipaddress.ip_address(ip))
|
524 |
+
|
525 |
+
@staticmethod
|
526 |
+
def int_to_ip(ip_int: int) -> str:
|
527 |
+
"""Convert integer to IP address"""
|
528 |
+
return str(ipaddress.ip_address(ip_int))
|
529 |
+
|
530 |
+
@staticmethod
|
531 |
+
def calculate_network(ip: str, netmask: str) -> str:
|
532 |
+
"""Calculate network address from IP and netmask"""
|
533 |
+
try:
|
534 |
+
interface = ipaddress.ip_interface(f"{ip}/{netmask}")
|
535 |
+
return str(interface.network)
|
536 |
+
except (ipaddress.AddressValueError, ValueError):
|
537 |
+
return "0.0.0.0/0"
|
538 |
+
|
539 |
+
@staticmethod
|
540 |
+
def is_private_ip(ip: str) -> bool:
|
541 |
+
"""Check if IP address is private"""
|
542 |
+
try:
|
543 |
+
ip_obj = ipaddress.ip_address(ip)
|
544 |
+
return ip_obj.is_private
|
545 |
+
except (ipaddress.AddressValueError, ValueError):
|
546 |
+
return False
|
547 |
+
|
548 |
+
@staticmethod
|
549 |
+
def is_multicast_ip(ip: str) -> bool:
|
550 |
+
"""Check if IP address is multicast"""
|
551 |
+
try:
|
552 |
+
ip_obj = ipaddress.ip_address(ip)
|
553 |
+
return ip_obj.is_multicast
|
554 |
+
except (ipaddress.AddressValueError, ValueError):
|
555 |
+
return False
|
556 |
+
|
557 |
+
@staticmethod
|
558 |
+
def validate_cidr(cidr: str) -> bool:
|
559 |
+
"""Validate CIDR notation"""
|
560 |
+
try:
|
561 |
+
ipaddress.ip_network(cidr, strict=False)
|
562 |
+
return True
|
563 |
+
except (ipaddress.AddressValueError, ValueError):
|
564 |
+
return False
|
565 |
+
|
database/app.db
ADDED
Binary file (16.4 kB). View file
|
|
main.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
# DON\'T CHANGE THIS !!!
|
4 |
+
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
5 |
+
|
6 |
+
from flask import Flask, send_from_directory
|
7 |
+
from models.user import db
|
8 |
+
from routes.user import user_bp
|
9 |
+
from routes.isp_api import init_engines, isp_api
|
10 |
+
|
11 |
+
app = Flask(__name__, static_folder=os.path.join(os.path.dirname(__file__), 'static'))
|
12 |
+
app.config['SECRET_KEY'] = 'asdf#FGSgvasgf$5$WGT'
|
13 |
+
|
14 |
+
app.register_blueprint(user_bp, url_prefix='/api')
|
15 |
+
app.register_blueprint(isp_api, url_prefix='/api')
|
16 |
+
|
17 |
+
# uncomment if you need to use database
|
18 |
+
app.config['SQLALCHEMY_DATABASE_URI'] = f"sqlite:///{os.path.join(os.path.dirname(__file__), 'database', 'app.db')}"
|
19 |
+
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
|
20 |
+
db.init_app(app)
|
21 |
+
|
22 |
+
with app.app_context():
|
23 |
+
db.create_all()
|
24 |
+
|
25 |
+
# Default configuration for engines
|
26 |
+
app.config["dhcp"] = {
|
27 |
+
"network": "10.0.0.0/24",
|
28 |
+
"range_start": "10.0.0.10",
|
29 |
+
"range_end": "10.0.0.100",
|
30 |
+
"lease_time": 3600,
|
31 |
+
"gateway": "10.0.0.1",
|
32 |
+
"dns_servers": ["8.8.8.8", "8.8.4.4"]
|
33 |
+
}
|
34 |
+
|
35 |
+
# Initialize engines only once, when the Flask app is not in debug mode's reloader process
|
36 |
+
if not app.debug or os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
|
37 |
+
init_engines(app.config)
|
38 |
+
|
39 |
+
@app.route('/')
|
40 |
+
def serve_root():
|
41 |
+
return serve('')
|
42 |
+
|
43 |
+
@app.route('/<path:path>')
|
44 |
+
def serve(path):
|
45 |
+
static_folder_path = app.static_folder
|
46 |
+
if static_folder_path is None:
|
47 |
+
return "Static folder not configured", 404
|
48 |
+
|
49 |
+
if path != "" and os.path.exists(os.path.join(static_folder_path, path)):
|
50 |
+
return send_from_directory(static_folder_path, path)
|
51 |
+
else:
|
52 |
+
index_path = os.path.join(static_folder_path, 'index.html')
|
53 |
+
if os.path.exists(index_path):
|
54 |
+
return send_from_directory(static_folder_path, 'index.html')
|
55 |
+
else:
|
56 |
+
return "index.html not found", 404
|
57 |
+
|
58 |
+
|
59 |
+
if __name__ == '__main__':
|
60 |
+
app.run(host='0.0.0.0', port=5000, debug=False)
|
61 |
+
|
62 |
+
|
main_isp.py
ADDED
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Main ISP Application
|
3 |
+
|
4 |
+
Integrates all core modules and provides the main application entry point
|
5 |
+
"""
|
6 |
+
|
7 |
+
import os
|
8 |
+
import sys
|
9 |
+
import json
|
10 |
+
import threading
|
11 |
+
import time
|
12 |
+
from flask import Flask
|
13 |
+
from flask_cors import CORS
|
14 |
+
|
15 |
+
# Add project root to path
|
16 |
+
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
17 |
+
|
18 |
+
# Import routes and core modules
|
19 |
+
from src.routes.isp_api import isp_api, init_engines
|
20 |
+
|
21 |
+
|
22 |
+
def load_config():
|
23 |
+
"""Load configuration from file or use defaults"""
|
24 |
+
config_file = os.path.join(os.path.dirname(__file__), 'config.json')
|
25 |
+
|
26 |
+
default_config = {
|
27 |
+
"dhcp": {
|
28 |
+
"network": "10.0.0.0/24",
|
29 |
+
"range_start": "10.0.0.10",
|
30 |
+
"range_end": "10.0.0.100",
|
31 |
+
"lease_time": 3600,
|
32 |
+
"gateway": "10.0.0.1",
|
33 |
+
"dns_servers": ["8.8.8.8", "8.8.4.4"]
|
34 |
+
},
|
35 |
+
"nat": {
|
36 |
+
"port_range_start": 10000,
|
37 |
+
"port_range_end": 65535,
|
38 |
+
"session_timeout": 300,
|
39 |
+
"host_ip": "0.0.0.0"
|
40 |
+
},
|
41 |
+
"firewall": {
|
42 |
+
"default_policy": "ACCEPT",
|
43 |
+
"log_blocked": True,
|
44 |
+
"log_accepted": False,
|
45 |
+
"max_log_entries": 10000,
|
46 |
+
"rules": [
|
47 |
+
{
|
48 |
+
"rule_id": "allow_dhcp",
|
49 |
+
"priority": 1,
|
50 |
+
"action": "ACCEPT",
|
51 |
+
"direction": "BOTH",
|
52 |
+
"dest_port": "67,68",
|
53 |
+
"protocol": "UDP",
|
54 |
+
"description": "Allow DHCP traffic",
|
55 |
+
"enabled": True
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"rule_id": "allow_dns",
|
59 |
+
"priority": 2,
|
60 |
+
"action": "ACCEPT",
|
61 |
+
"direction": "BOTH",
|
62 |
+
"dest_port": "53",
|
63 |
+
"protocol": "UDP",
|
64 |
+
"description": "Allow DNS traffic",
|
65 |
+
"enabled": True
|
66 |
+
}
|
67 |
+
]
|
68 |
+
},
|
69 |
+
"tcp": {
|
70 |
+
"initial_window": 65535,
|
71 |
+
"max_retries": 3,
|
72 |
+
"timeout": 300,
|
73 |
+
"time_wait_timeout": 120,
|
74 |
+
"mss": 1460
|
75 |
+
},
|
76 |
+
"router": {
|
77 |
+
"router_id": "virtual-isp-router",
|
78 |
+
"default_gateway": "10.0.0.1",
|
79 |
+
"interfaces": [
|
80 |
+
{
|
81 |
+
"name": "virtual0",
|
82 |
+
"ip_address": "10.0.0.1",
|
83 |
+
"netmask": "255.255.255.0",
|
84 |
+
"enabled": True,
|
85 |
+
"mtu": 1500
|
86 |
+
}
|
87 |
+
],
|
88 |
+
"static_routes": []
|
89 |
+
},
|
90 |
+
"socket_translator": {
|
91 |
+
"connect_timeout": 10,
|
92 |
+
"read_timeout": 30,
|
93 |
+
"max_connections": 1000,
|
94 |
+
"buffer_size": 8192
|
95 |
+
},
|
96 |
+
"packet_bridge": {
|
97 |
+
"websocket_host": "0.0.0.0",
|
98 |
+
"websocket_port": 8765,
|
99 |
+
"tcp_host": "0.0.0.0",
|
100 |
+
"tcp_port": 8766,
|
101 |
+
"max_clients": 100,
|
102 |
+
"client_timeout": 300
|
103 |
+
},
|
104 |
+
"session_tracker": {
|
105 |
+
"max_sessions": 10000,
|
106 |
+
"session_timeout": 3600,
|
107 |
+
"cleanup_interval": 300,
|
108 |
+
"metrics_retention": 86400
|
109 |
+
},
|
110 |
+
"logger": {
|
111 |
+
"log_level": "INFO",
|
112 |
+
"log_to_file": True,
|
113 |
+
"log_file_path": "/tmp/virtual_isp.log",
|
114 |
+
"log_file_max_size": 10485760,
|
115 |
+
"log_file_backup_count": 5,
|
116 |
+
"log_to_console": True,
|
117 |
+
"structured_logging": True,
|
118 |
+
"max_memory_logs": 10000
|
119 |
+
},
|
120 |
+
"openvpn": {
|
121 |
+
"server_config_path": "/etc/openvpn/server/server.conf",
|
122 |
+
"ca_cert_path": "/home/ubuntu/openvpn-ca/pki/ca.crt",
|
123 |
+
"server_cert_path": "/home/ubuntu/openvpn-ca/pki/issued/server.crt",
|
124 |
+
"server_key_path": "/home/ubuntu/openvpn-ca/pki/private/server.key",
|
125 |
+
"dh_path": "/home/ubuntu/openvpn-ca/pki/dh.pem",
|
126 |
+
"vpn_network": "10.8.0.0/24",
|
127 |
+
"vpn_server_ip": "10.8.0.1",
|
128 |
+
"vpn_port": 1194,
|
129 |
+
"protocol": "udp",
|
130 |
+
"auto_start": False,
|
131 |
+
"client_to_client": False,
|
132 |
+
"push_routes": [
|
133 |
+
"redirect-gateway def1 bypass-dhcp",
|
134 |
+
"dhcp-option DNS 8.8.8.8",
|
135 |
+
"dhcp-option DNS 8.8.4.4"
|
136 |
+
]
|
137 |
+
}
|
138 |
+
}
|
139 |
+
|
140 |
+
if os.path.exists(config_file):
|
141 |
+
try:
|
142 |
+
with open(config_file, 'r') as f:
|
143 |
+
file_config = json.load(f)
|
144 |
+
|
145 |
+
# Merge with defaults
|
146 |
+
def merge_config(default, override):
|
147 |
+
result = default.copy()
|
148 |
+
for key, value in override.items():
|
149 |
+
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
|
150 |
+
result[key] = merge_config(result[key], value)
|
151 |
+
else:
|
152 |
+
result[key] = value
|
153 |
+
return result
|
154 |
+
|
155 |
+
return merge_config(default_config, file_config)
|
156 |
+
|
157 |
+
except Exception as e:
|
158 |
+
print(f"Error loading config file: {e}")
|
159 |
+
print("Using default configuration")
|
160 |
+
return default_config
|
161 |
+
else:
|
162 |
+
# Save default config
|
163 |
+
try:
|
164 |
+
with open(config_file, 'w') as f:
|
165 |
+
json.dump(default_config, f, indent=2)
|
166 |
+
print(f"Created default configuration file: {config_file}")
|
167 |
+
except Exception as e:
|
168 |
+
print(f"Could not save default config: {e}")
|
169 |
+
|
170 |
+
return default_config
|
171 |
+
|
172 |
+
|
173 |
+
def create_app():
|
174 |
+
"""Create and configure Flask application"""
|
175 |
+
app = Flask(__name__, static_folder=os.path.join(os.path.dirname(__file__), 'static'))
|
176 |
+
|
177 |
+
# Enable CORS for all routes
|
178 |
+
CORS(app, origins="*", allow_headers=["Content-Type", "Authorization"])
|
179 |
+
|
180 |
+
# Load configuration
|
181 |
+
config = load_config()
|
182 |
+
app.config['ISP_CONFIG'] = config
|
183 |
+
|
184 |
+
# Register blueprints
|
185 |
+
app.register_blueprint(isp_api, url_prefix='/api')
|
186 |
+
|
187 |
+
# Initialize engines
|
188 |
+
init_engines(config)
|
189 |
+
|
190 |
+
# Serve static files
|
191 |
+
@app.route('/', defaults={'path': ''})
|
192 |
+
@app.route('/<path:path>')
|
193 |
+
def serve_static(path):
|
194 |
+
static_folder_path = app.static_folder
|
195 |
+
if static_folder_path is None:
|
196 |
+
return "Static folder not configured", 404
|
197 |
+
|
198 |
+
if path != "" and os.path.exists(os.path.join(static_folder_path, path)):
|
199 |
+
return app.send_static_file(path)
|
200 |
+
else:
|
201 |
+
index_path = os.path.join(static_folder_path, 'index.html')
|
202 |
+
if os.path.exists(index_path):
|
203 |
+
return app.send_static_file('index.html')
|
204 |
+
else:
|
205 |
+
return """
|
206 |
+
<!DOCTYPE html>
|
207 |
+
<html>
|
208 |
+
<head>
|
209 |
+
<title>Virtual ISP Stack</title>
|
210 |
+
<style>
|
211 |
+
body { font-family: Arial, sans-serif; margin: 40px; }
|
212 |
+
.container { max-width: 800px; margin: 0 auto; }
|
213 |
+
.status { background: #f0f0f0; padding: 20px; border-radius: 5px; }
|
214 |
+
.api-link { color: #0066cc; text-decoration: none; }
|
215 |
+
.api-link:hover { text-decoration: underline; }
|
216 |
+
</style>
|
217 |
+
</head>
|
218 |
+
<body>
|
219 |
+
<div class="container">
|
220 |
+
<h1>Virtual ISP Stack</h1>
|
221 |
+
<div class="status">
|
222 |
+
<h2>System Status</h2>
|
223 |
+
<p>The Virtual ISP Stack is running successfully!</p>
|
224 |
+
<p><strong>API Endpoint:</strong> <a href="/api/status" class="api-link">/api/status</a></p>
|
225 |
+
<p><strong>System Stats:</strong> <a href="/api/stats" class="api-link">/api/stats</a></p>
|
226 |
+
</div>
|
227 |
+
|
228 |
+
<h2>Available API Endpoints</h2>
|
229 |
+
<ul>
|
230 |
+
<li><a href="/api/config" class="api-link">GET /api/config</a> - System configuration</li>
|
231 |
+
<li><a href="/api/status" class="api-link">GET /api/status</a> - System status</li>
|
232 |
+
<li><a href="/api/stats" class="api-link">GET /api/stats</a> - System statistics</li>
|
233 |
+
<li><a href="/api/dhcp/leases" class="api-link">GET /api/dhcp/leases</a> - DHCP leases</li>
|
234 |
+
<li><a href="/api/nat/sessions" class="api-link">GET /api/nat/sessions</a> - NAT sessions</li>
|
235 |
+
<li><a href="/api/firewall/rules" class="api-link">GET /api/firewall/rules</a> - Firewall rules</li>
|
236 |
+
<li><a href="/api/tcp/connections" class="api-link">GET /api/tcp/connections</a> - TCP connections</li>
|
237 |
+
<li><a href="/api/router/routes" class="api-link">GET /api/router/routes</a> - Routing table</li>
|
238 |
+
<li><a href="/api/bridge/clients" class="api-link">GET /api/bridge/clients</a> - Bridge clients</li>
|
239 |
+
<li><a href="/api/sessions" class="api-link">GET /api/sessions</a> - Session tracking</li>
|
240 |
+
<li><a href="/api/logs" class="api-link">GET /api/logs</a> - System logs</li>
|
241 |
+
</ul>
|
242 |
+
|
243 |
+
<h2>WebSocket Bridge</h2>
|
244 |
+
<p>WebSocket server running on port 8765 for packet bridge connections.</p>
|
245 |
+
<p>TCP server running on port 8766 for packet bridge connections.</p>
|
246 |
+
</div>
|
247 |
+
</body>
|
248 |
+
</html>
|
249 |
+
""", 200
|
250 |
+
|
251 |
+
return app
|
252 |
+
|
253 |
+
|
254 |
+
def main():
|
255 |
+
"""Main application entry point"""
|
256 |
+
print("Starting Virtual ISP Stack...")
|
257 |
+
|
258 |
+
# Create Flask app
|
259 |
+
app = create_app()
|
260 |
+
|
261 |
+
# Start the application
|
262 |
+
print("Virtual ISP Stack started successfully!")
|
263 |
+
print("API available at: http://0.0.0.0:5000/api/")
|
264 |
+
print("WebSocket bridge at: ws://0.0.0.0:8765")
|
265 |
+
print("TCP bridge at: tcp://0.0.0.0:8766")
|
266 |
+
|
267 |
+
# Run Flask app
|
268 |
+
app.run(host='0.0.0.0', port=5000, debug=False, threaded=True)
|
269 |
+
|
270 |
+
|
271 |
+
if __name__ == '__main__':
|
272 |
+
main()
|
273 |
+
|
models/__pycache__/user.cpython-311.pyc
ADDED
Binary file (1.3 kB). View file
|
|
models/user.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask_sqlalchemy import SQLAlchemy
|
2 |
+
|
3 |
+
db = SQLAlchemy()
|
4 |
+
|
5 |
+
class User(db.Model):
|
6 |
+
id = db.Column(db.Integer, primary_key=True)
|
7 |
+
username = db.Column(db.String(80), unique=True, nullable=False)
|
8 |
+
email = db.Column(db.String(120), unique=True, nullable=False)
|
9 |
+
|
10 |
+
def __repr__(self):
|
11 |
+
return f'<User {self.username}>'
|
12 |
+
|
13 |
+
def to_dict(self):
|
14 |
+
return {
|
15 |
+
'id': self.id,
|
16 |
+
'username': self.username,
|
17 |
+
'email': self.email
|
18 |
+
}
|
openvpn/ca.crt
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIDMzCCAhugAwIBAgIUNO765P4t/yD/PnIFTMVs0Q32TJYwDQYJKoZIhvcNAQEL
|
3 |
+
BQAwDjEMMAoGA1UEAwwDeWVzMB4XDTI1MDgwMjAxMjkzNVoXDTM1MDczMTAxMjkz
|
4 |
+
NVowDjEMMAoGA1UEAwwDeWVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
|
5 |
+
AQEAtwhMGXouHnHBRd2RhdrW8sOMgqt4wDXZC0J+4UMjOX6Y7t2O1Sgw/sWhwFPk
|
6 |
+
QF/cMoQIvsucklPogcnzzGtv9zDkAXyVyCC27UYbg8JfWZK3ZMrt6dfEmYf4KKXm
|
7 |
+
D6PLn9guxzBB63dhEWx/7fd6H9C/rK/u0rOh15DQRnfEI468cmXS5uNg8ke/73+y
|
8 |
+
Gzb6q7ZOFByBAwM0hW0lStBaIIcxouFrIK8B72O8H+6t10K1GvgiBhKvM3cc8dpN
|
9 |
+
y4qvRoN/o+eXarZG7G9dfm9OFgdd9LoXPTTbO+ftFPKOq4F41PnMd2Zcyk7P3GCr
|
10 |
+
3oK7NbISxZ5efLpy45lgSpqKBwIDAQABo4GIMIGFMB0GA1UdDgQWBBQIi0Er30cV
|
11 |
+
Qzi+U/LPV4Lf3yvGIzBJBgNVHSMEQjBAgBQIi0Er30cVQzi+U/LPV4Lf3yvGI6ES
|
12 |
+
pBAwDjEMMAoGA1UEAwwDeWVzghQ07vrk/i3/IP8+cgVMxWzRDfZMljAMBgNVHRME
|
13 |
+
BTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAHzfSFbi1G7WC
|
14 |
+
vMSOqSv4/jlBExnz/AlLUBHhgDomIdLK8Pb3tyCD5IYkmi0NT5x6DORcOV2ow1JZ
|
15 |
+
o4BL7OVV+fhz3VKXEpG+s3gq5j2m+raqLtu6QKBGg7SIUZ4MLjggvAcPjsK+n8sK
|
16 |
+
86sAUFVTccBxJlKBShAUPSNihyWwxB4PQFvwhefNQSoID1kAB2Fzf1beMX6Gp6Lj
|
17 |
+
ldI6e63lpYtIbp4+2F5SxJ/hGTUx+nWbOAHPvhBfhN6sEu9G1C5KPR0cm+xxOpZ9
|
18 |
+
lA7y4Dea7pyVybR/b7lFquE3TReXCoLx79UNNSv8erIlsy1jh9yXDnTCk8SN1dpO
|
19 |
+
YwJ9U0AHXA==
|
20 |
+
-----END CERTIFICATE-----
|
openvpn/dh.pem
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN DH PARAMETERS-----
|
2 |
+
MIIBCAKCAQEAlPRBW0tYm271xYHi15JrD3JRlpvdjAm+CZoEq0ElLXvSlIKaNQls
|
3 |
+
ITH+KIBBX3pgbFFk03fO9ApF0kSOzycRRCuW970iCkDoFUN9y58EG+BI863FkU1h
|
4 |
+
3dx+c59HqdWXkzFK+SmTfKIe12alZFik5G0Xs0hkphCgPaXvWlojorjQoRfKySw3
|
5 |
+
VxpybKS83+l3t2ER3Z03IRvWinlnuxVAcymzeSR9hwIMJi3RmYmNmdXNel/WFAo2
|
6 |
+
zT5j2f2OZHtnBhvo1V92Rml+5rJksPX4lJMRNwVEnXwqVUyCQOTTiGTUjLOO2gdk
|
7 |
+
HLhH5teetBdKL4tFcldeIJSk3e0oWXbURwIBAg==
|
8 |
+
-----END DH PARAMETERS-----
|
openvpn/server.conf
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
port 1194
|
2 |
+
proto udp
|
3 |
+
dev tun
|
4 |
+
ca /etc/openvpn/server/ca.crt
|
5 |
+
cert /etc/openvpn/server/server.crt
|
6 |
+
key /etc/openvpn/server/server.key
|
7 |
+
dh /etc/openvpn/server/dh.pem
|
8 |
+
server 10.8.0.0 255.255.255.0
|
9 |
+
ifconfig-pool-persist ipp.txt
|
10 |
+
push "redirect-gateway def1 bypass-dhcp"
|
11 |
+
push "dhcp-option DNS 8.8.8.8"
|
12 |
+
push "dhcp-option DNS 8.8.4.4"
|
13 |
+
keepalive 10 120
|
14 |
+
cipher AES-256-CBC
|
15 |
+
persist-key
|
16 |
+
persist-tun
|
17 |
+
status openvpn-status.log
|
18 |
+
verb 3
|
19 |
+
explicit-exit-notify 1
|
20 |
+
|
21 |
+
|
openvpn/server.crt
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Certificate:
|
2 |
+
Data:
|
3 |
+
Version: 3 (0x2)
|
4 |
+
Serial Number:
|
5 |
+
dd:b5:29:c9:70:b2:b3:65:70:ac:0f:57:30:15:b4:2a
|
6 |
+
Signature Algorithm: sha256WithRSAEncryption
|
7 |
+
Issuer: CN=yes
|
8 |
+
Validity
|
9 |
+
Not Before: Aug 2 01:29:38 2025 GMT
|
10 |
+
Not After : Nov 5 01:29:38 2027 GMT
|
11 |
+
Subject: CN=server
|
12 |
+
Subject Public Key Info:
|
13 |
+
Public Key Algorithm: rsaEncryption
|
14 |
+
Public-Key: (2048 bit)
|
15 |
+
Modulus:
|
16 |
+
00:dd:9e:02:fb:e3:57:cd:51:43:36:6a:2f:30:f5:
|
17 |
+
a1:42:5c:16:f1:7b:4b:0a:aa:b1:34:b5:86:51:3e:
|
18 |
+
6b:82:2e:59:df:42:21:cf:65:14:ea:8c:93:3c:0a:
|
19 |
+
72:a5:2e:0f:64:1a:ec:76:52:18:b2:d3:a0:df:df:
|
20 |
+
19:83:7e:39:9e:f5:16:18:36:34:ae:57:cf:2c:89:
|
21 |
+
7c:c5:97:e3:8f:d0:83:08:7f:14:0c:74:2c:d2:95:
|
22 |
+
09:6e:42:99:a0:28:69:83:68:f4:9c:0e:b5:3e:08:
|
23 |
+
8f:d8:06:ec:d5:aa:c8:bc:19:4b:ff:e4:99:50:12:
|
24 |
+
67:25:d4:79:94:1f:3d:64:b2:c8:00:ea:97:c2:df:
|
25 |
+
b8:1c:dc:69:47:9f:59:df:03:06:5a:32:7a:fa:51:
|
26 |
+
96:45:9a:b7:e7:03:ef:9d:3b:94:51:9d:08:69:bb:
|
27 |
+
b0:3e:c8:9c:a3:a0:9c:18:aa:e9:88:ec:96:c3:71:
|
28 |
+
b1:f6:a7:09:ff:c0:56:b1:24:22:ab:fc:9a:c5:fc:
|
29 |
+
fd:67:8e:1a:86:ff:0a:5b:28:46:b4:20:93:05:b6:
|
30 |
+
ff:87:93:66:7d:ae:92:c4:0d:20:99:e9:c5:b8:3d:
|
31 |
+
41:3a:06:83:49:e5:13:2e:d6:33:94:45:6a:36:84:
|
32 |
+
f9:c9:61:fe:98:3a:6e:41:ed:d8:8c:f1:55:3d:6d:
|
33 |
+
53:fb
|
34 |
+
Exponent: 65537 (0x10001)
|
35 |
+
X509v3 extensions:
|
36 |
+
X509v3 Basic Constraints:
|
37 |
+
CA:FALSE
|
38 |
+
X509v3 Subject Key Identifier:
|
39 |
+
F4:62:12:72:49:40:C2:8A:46:5A:CB:71:BE:33:58:25:B3:E0:01:AC
|
40 |
+
X509v3 Authority Key Identifier:
|
41 |
+
keyid:08:8B:41:2B:DF:47:15:43:38:BE:53:F2:CF:57:82:DF:DF:2B:C6:23
|
42 |
+
DirName:/CN=yes
|
43 |
+
serial:34:EE:FA:E4:FE:2D:FF:20:FF:3E:72:05:4C:C5:6C:D1:0D:F6:4C:96
|
44 |
+
X509v3 Extended Key Usage:
|
45 |
+
TLS Web Server Authentication
|
46 |
+
X509v3 Key Usage:
|
47 |
+
Digital Signature, Key Encipherment
|
48 |
+
X509v3 Subject Alternative Name:
|
49 |
+
DNS:server
|
50 |
+
Signature Algorithm: sha256WithRSAEncryption
|
51 |
+
Signature Value:
|
52 |
+
85:f7:59:01:c2:99:23:c3:9a:99:2a:0a:bc:5d:7d:1c:e8:7c:
|
53 |
+
e9:23:a5:87:08:bd:45:1b:a7:a9:b7:3a:06:b6:91:86:ac:61:
|
54 |
+
03:ae:cd:65:80:0e:e4:81:dc:38:b3:fe:6d:6f:02:e4:9e:43:
|
55 |
+
95:d0:a6:38:30:53:52:14:f1:96:2a:30:69:2f:56:24:65:ba:
|
56 |
+
53:c0:b0:22:23:2b:18:37:a1:0c:45:07:cb:ec:a9:71:f7:96:
|
57 |
+
2a:d2:18:94:f0:07:18:1f:4c:d2:c5:d5:66:8f:1d:5c:08:8d:
|
58 |
+
02:00:d6:0d:df:fd:6e:1e:2a:47:8c:30:fd:5b:46:56:0a:5a:
|
59 |
+
d4:6d:d4:99:c8:94:26:36:0b:86:30:dd:cb:3a:2e:a2:f3:80:
|
60 |
+
0f:62:80:f8:9d:ec:98:f2:96:20:4f:46:01:ae:9d:35:7f:34:
|
61 |
+
21:d7:71:89:b6:7a:ce:94:7e:14:e6:bf:b6:08:44:39:24:db:
|
62 |
+
aa:cf:54:46:34:8f:67:6c:72:22:f1:eb:e9:94:7d:73:26:f3:
|
63 |
+
2f:72:fe:28:b3:cb:28:c3:4c:14:3d:c3:81:1e:8d:96:96:e5:
|
64 |
+
df:af:c4:0a:06:71:16:df:8f:a3:30:50:79:45:95:4c:e8:57:
|
65 |
+
ee:ed:38:dd:82:8e:0e:b1:2b:4d:27:2b:6f:bc:c8:1c:91:de:
|
66 |
+
2c:55:69:38
|
67 |
+
-----BEGIN CERTIFICATE-----
|
68 |
+
MIIDWDCCAkCgAwIBAgIRAN21KclwsrNlcKwPVzAVtCowDQYJKoZIhvcNAQELBQAw
|
69 |
+
DjEMMAoGA1UEAwwDeWVzMB4XDTI1MDgwMjAxMjkzOFoXDTI3MTEwNTAxMjkzOFow
|
70 |
+
ETEPMA0GA1UEAwwGc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
|
71 |
+
AQEA3Z4C++NXzVFDNmovMPWhQlwW8XtLCqqxNLWGUT5rgi5Z30Ihz2UU6oyTPApy
|
72 |
+
pS4PZBrsdlIYstOg398Zg345nvUWGDY0rlfPLIl8xZfjj9CDCH8UDHQs0pUJbkKZ
|
73 |
+
oChpg2j0nA61PgiP2Abs1arIvBlL/+SZUBJnJdR5lB89ZLLIAOqXwt+4HNxpR59Z
|
74 |
+
3wMGWjJ6+lGWRZq35wPvnTuUUZ0IabuwPsico6CcGKrpiOyWw3Gx9qcJ/8BWsSQi
|
75 |
+
q/yaxfz9Z44ahv8KWyhGtCCTBbb/h5Nmfa6SxA0gmenFuD1BOgaDSeUTLtYzlEVq
|
76 |
+
NoT5yWH+mDpuQe3YjPFVPW1T+wIDAQABo4GtMIGqMAkGA1UdEwQCMAAwHQYDVR0O
|
77 |
+
BBYEFPRiEnJJQMKKRlrLcb4zWCWz4AGsMEkGA1UdIwRCMECAFAiLQSvfRxVDOL5T
|
78 |
+
8s9Xgt/fK8YjoRKkEDAOMQwwCgYDVQQDDAN5ZXOCFDTu+uT+Lf8g/z5yBUzFbNEN
|
79 |
+
9kyWMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAsGA1UdDwQEAwIFoDARBgNVHREECjAI
|
80 |
+
ggZzZXJ2ZXIwDQYJKoZIhvcNAQELBQADggEBAIX3WQHCmSPDmpkqCrxdfRzofOkj
|
81 |
+
pYcIvUUbp6m3Oga2kYasYQOuzWWADuSB3Diz/m1vAuSeQ5XQpjgwU1IU8ZYqMGkv
|
82 |
+
ViRlulPAsCIjKxg3oQxFB8vsqXH3lirSGJTwBxgfTNLF1WaPHVwIjQIA1g3f/W4e
|
83 |
+
KkeMMP1bRlYKWtRt1JnIlCY2C4Yw3cs6LqLzgA9igPid7JjyliBPRgGunTV/NCHX
|
84 |
+
cYm2es6UfhTmv7YIRDkk26rPVEY0j2dsciLx6+mUfXMm8y9y/iizyyjDTBQ9w4Ee
|
85 |
+
jZaW5d+vxAoGcRbfj6MwUHlFlUzoV+7tON2Cjg6xK00nK2+8yByR3ixVaTg=
|
86 |
+
-----END CERTIFICATE-----
|
openvpn/server.key
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN PRIVATE KEY-----
|
2 |
+
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDdngL741fNUUM2
|
3 |
+
ai8w9aFCXBbxe0sKqrE0tYZRPmuCLlnfQiHPZRTqjJM8CnKlLg9kGux2Uhiy06Df
|
4 |
+
3xmDfjme9RYYNjSuV88siXzFl+OP0IMIfxQMdCzSlQluQpmgKGmDaPScDrU+CI/Y
|
5 |
+
BuzVqsi8GUv/5JlQEmcl1HmUHz1kssgA6pfC37gc3GlHn1nfAwZaMnr6UZZFmrfn
|
6 |
+
A++dO5RRnQhpu7A+yJyjoJwYqumI7JbDcbH2pwn/wFaxJCKr/JrF/P1njhqG/wpb
|
7 |
+
KEa0IJMFtv+Hk2Z9rpLEDSCZ6cW4PUE6BoNJ5RMu1jOURWo2hPnJYf6YOm5B7diM
|
8 |
+
8VU9bVP7AgMBAAECggEATtwR0sEYtspSYPQS+9iD/AGZ9m75in+n1Ao+E/3isq28
|
9 |
+
tDmrn0moUjgYklZjakzEFEqSVx4qhMPSrKcORKCvb1Vl+dKcF2fOpFn+KK++Pagk
|
10 |
+
YGsb3ryeUIbRFsejM/79YNIBrOB89OiGCwiX0QZXLLvRs+qL9Za+1pLPenpNVd2w
|
11 |
+
zL+AZ8QkJZdHn1vOZt9vKRlpe8psAt64RHb+LqhYWfeLlpIUjpM5Vu9FFewMGPrw
|
12 |
+
n+GVCzK4ylq0pJ9bYwKI5Hw4qnJ3j5bGIumEjYBqqmef1+OTD3r/wyhTGpK9RRAu
|
13 |
+
WD9YGJeQx3ybzRL7Wj6k5g0dn+UA82Lh7Y8n9IoSaQKBgQDqP/BU2KapOHgFt2DE
|
14 |
+
WHU/+zA7/kfMJMGB5dYy8oXTxUY7WuqX9lja3rC0XuH10JTD6Q21jkTujc0T5/1B
|
15 |
+
4KxuX+nQP/T9b4XzVM3pKWVmHUt6wf24sbuTNxOy/Q/wC7eCnkr04CEl0vf3E56N
|
16 |
+
JaLG11dbpcn+9RC9FlUhlYY8QwKBgQDyMcz43915YGOQMkGVZFPvKyOy7ol4fFZv
|
17 |
+
VRfRoGx9CfHCIOfh9vmlUy6TR4qAQkCnkL730OsxpW3aDTe3qcAcmhiK7u5TfWrE
|
18 |
+
cd1WgrkymJ8hyEk6FSV0GMKrccQeEo2T95cKnk6lNXnEdNp5kx7LBQhL36fEtMXS
|
19 |
+
FGCcRkNp6QKBgAbm6WLmm0qDIm4wsAY5AQNomEw8OstWDemQ5xXLNYw+1Mns7Nqb
|
20 |
+
ZJTWWOiHnyrKAYggNsoxrfBFd1Rt0nV9dDcwVkhPih1pis3XotWK5bTzigTM8Hff
|
21 |
+
rMIyrj7o2+5bugV8OoMqk2903t+F0XchM8GeGLHXmbMMb3jSzqFVsYXXAoGBAII1
|
22 |
+
Z/99S7LPsXd6rWvFzqJMzRqLx/iw0D92viGDYBAxYnp9+myvvTO27tlbowilleEA
|
23 |
+
nsrY1TmRuOd8J7JkXtaBuiQnpJXaXaZTmS3DhhG/n/4nkcbaS5KJJU/LECcizl74
|
24 |
+
w4l/5sRHZbnLIRIvmGSJxhYUnjvQ/HGfZvldhSzRAoGBAMVTrxWedC2XeSMwjdhF
|
25 |
+
zeDBAp/dTMEnRaS0j3rp+4a4l7Sus1L/p8gBrJtnf/B43bNvQ5cr2jwH7Ql5cF1A
|
26 |
+
A7hpZ3C0trNaf6WqslJQhN8j8Cs85S/8rPGM5yAfyzKTMe0ytLUjn+XiQCqCUFcT
|
27 |
+
Inqx4ll7r2tlcI3aMlvN2qsd
|
28 |
+
-----END PRIVATE KEY-----
|
requirements.txt
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Core Flask dependencies
|
2 |
+
Flask==3.1.1
|
3 |
+
flask-cors==6.0.0
|
4 |
+
Flask-SQLAlchemy==3.1.1
|
5 |
+
Werkzeug==3.1.3
|
6 |
+
|
7 |
+
# Database
|
8 |
+
SQLAlchemy==2.0.41
|
9 |
+
|
10 |
+
# Async and networking
|
11 |
+
aiohttp==3.12.15
|
12 |
+
aiohappyeyeballs==2.6.1
|
13 |
+
aiosignal==1.4.0
|
14 |
+
websockets==15.0.1
|
15 |
+
|
16 |
+
# Utilities
|
17 |
+
attrs==25.3.0
|
18 |
+
blinker==1.9.0
|
19 |
+
click==8.2.1
|
20 |
+
frozenlist==1.7.0
|
21 |
+
greenlet==3.2.3
|
22 |
+
idna==3.10
|
23 |
+
itsdangerous==2.2.0
|
24 |
+
Jinja2==3.1.6
|
25 |
+
MarkupSafe==3.0.2
|
26 |
+
multidict==6.6.3
|
27 |
+
propcache==0.3.2
|
28 |
+
typing_extensions==4.14.0
|
29 |
+
yarl==1.20.1
|
30 |
+
|
31 |
+
# Additional dependencies for VPN management
|
32 |
+
psutil==5.9.8
|
33 |
+
|
routes/__pycache__/isp_api.cpython-311.pyc
ADDED
Binary file (47.9 kB). View file
|
|
routes/__pycache__/user.cpython-311.pyc
ADDED
Binary file (3.4 kB). View file
|
|
routes/isp_api.py
ADDED
@@ -0,0 +1,1171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
ISP API Routes
|
3 |
+
|
4 |
+
Flask routes for the Virtual ISP Stack API endpoints
|
5 |
+
"""
|
6 |
+
|
7 |
+
from flask import Blueprint, jsonify, request, Response
|
8 |
+
from flask_cors import cross_origin
|
9 |
+
import json
|
10 |
+
import time
|
11 |
+
from typing import Dict, Any
|
12 |
+
|
13 |
+
# Import core modules
|
14 |
+
from core.dhcp_server import DHCPServer
|
15 |
+
from core.nat_engine import NATEngine
|
16 |
+
from core.firewall import FirewallEngine, FirewallRule, FirewallRuleBuilder, FirewallAction, FirewallDirection
|
17 |
+
from core.tcp_engine import TCPEngine
|
18 |
+
from core.virtual_router import VirtualRouter
|
19 |
+
from core.socket_translator import SocketTranslator
|
20 |
+
from core.packet_bridge import PacketBridge
|
21 |
+
from core.session_tracker import SessionTracker, SessionType, SessionState
|
22 |
+
from core.logger import VirtualISPLogger, LogLevel, LogCategory, LogFilter
|
23 |
+
from core.openvpn_manager import OpenVPNManager, initialize_openvpn_manager, get_openvpn_manager
|
24 |
+
from core.traffic_router import TrafficRouter
|
25 |
+
|
26 |
+
# Create blueprint
|
27 |
+
isp_api = Blueprint('isp_api', __name__)
|
28 |
+
|
29 |
+
# Global instances (will be initialized by main app)
|
30 |
+
dhcp_server: DHCPServer = None
|
31 |
+
nat_engine: NATEngine = None
|
32 |
+
firewall_engine: FirewallEngine = None
|
33 |
+
tcp_engine: TCPEngine = None
|
34 |
+
virtual_router: VirtualRouter = None
|
35 |
+
socket_translator: SocketTranslator = None
|
36 |
+
packet_bridge: PacketBridge = None
|
37 |
+
session_tracker: SessionTracker = None
|
38 |
+
logger: VirtualISPLogger = None
|
39 |
+
openvpn_manager: OpenVPNManager = None
|
40 |
+
traffic_router: TrafficRouter = None
|
41 |
+
|
42 |
+
|
43 |
+
def init_engines(config: Dict[str, Any]):
|
44 |
+
"""Initialize all ISP stack engines including traffic router"""
|
45 |
+
global dhcp_server, nat_engine, firewall_engine, tcp_engine, virtual_router
|
46 |
+
global socket_translator, packet_bridge, session_tracker, logger, openvpn_manager, traffic_router
|
47 |
+
|
48 |
+
try:
|
49 |
+
# Initialize logger first
|
50 |
+
logger = VirtualISPLogger({})
|
51 |
+
logger.start()
|
52 |
+
|
53 |
+
# Initialize core engines
|
54 |
+
dhcp_server = DHCPServer(config.get('dhcp', {}))
|
55 |
+
nat_engine = NATEngine(config.get('nat', {}))
|
56 |
+
firewall_engine = FirewallEngine(config.get('firewall', {}))
|
57 |
+
tcp_engine = TCPEngine(config.get('tcp', {}))
|
58 |
+
virtual_router = VirtualRouter(config.get('router', {}))
|
59 |
+
socket_translator = SocketTranslator(config.get('socket_translator', {}))
|
60 |
+
session_tracker = SessionTracker(config.get('session_tracker', {}))
|
61 |
+
|
62 |
+
# Initialize traffic router for VPN routing
|
63 |
+
traffic_router = TrafficRouter(config.get('traffic_router', {}))
|
64 |
+
|
65 |
+
# Initialize OpenVPN manager with traffic router
|
66 |
+
openvpn_manager = OpenVPNManager(config.get('openvpn', {}))
|
67 |
+
|
68 |
+
# Set component references for integration
|
69 |
+
openvpn_manager.set_isp_components(
|
70 |
+
dhcp_server=dhcp_server,
|
71 |
+
nat_engine=nat_engine,
|
72 |
+
firewall=firewall_engine,
|
73 |
+
router=virtual_router,
|
74 |
+
traffic_router=traffic_router
|
75 |
+
)
|
76 |
+
|
77 |
+
# Initialize packet bridge (requires other components)
|
78 |
+
packet_bridge = PacketBridge(config.get('packet_bridge', {}))
|
79 |
+
|
80 |
+
# Start all engines
|
81 |
+
dhcp_server.start()
|
82 |
+
nat_engine.start()
|
83 |
+
tcp_engine.start()
|
84 |
+
socket_translator.start()
|
85 |
+
session_tracker.start()
|
86 |
+
packet_bridge.start()
|
87 |
+
|
88 |
+
# Start traffic router
|
89 |
+
traffic_router.start()
|
90 |
+
|
91 |
+
logger.log(LogLevel.INFO, LogCategory.SYSTEM, "api", "All engines initialized and started")
|
92 |
+
|
93 |
+
return True
|
94 |
+
|
95 |
+
except Exception as e:
|
96 |
+
print(f"Error initializing engines: {e}")
|
97 |
+
return False
|
98 |
+
|
99 |
+
|
100 |
+
# Default configuration
|
101 |
+
DEFAULT_CONFIG = {
|
102 |
+
'dhcp': {
|
103 |
+
'pool_start': '10.0.0.10',
|
104 |
+
'pool_end': '10.0.0.100',
|
105 |
+
'lease_time': 3600,
|
106 |
+
'gateway': '10.0.0.1',
|
107 |
+
'dns_servers': ['8.8.8.8', '8.8.4.4']
|
108 |
+
},
|
109 |
+
'nat': {
|
110 |
+
'port_range_start': 10000,
|
111 |
+
'port_range_end': 65535,
|
112 |
+
'session_timeout': 300
|
113 |
+
},
|
114 |
+
'firewall': {
|
115 |
+
'default_policy': 'ACCEPT',
|
116 |
+
'log_blocked': True
|
117 |
+
},
|
118 |
+
'traffic_router': {
|
119 |
+
'vpn_network': '10.8.0.0/24',
|
120 |
+
'vpn_gateway': '10.8.0.1',
|
121 |
+
'enable_free_data_routing': True
|
122 |
+
},
|
123 |
+
'tcp': {
|
124 |
+
'initial_window': 65535,
|
125 |
+
'max_retries': 3,
|
126 |
+
'timeout': 30
|
127 |
+
},
|
128 |
+
'openvpn': {
|
129 |
+
'config_path': '/etc/openvpn/server/server.conf',
|
130 |
+
'status_log': '/var/log/openvpn/openvpn-status.log',
|
131 |
+
'port': 1194,
|
132 |
+
'protocol': 'udp'
|
133 |
+
}
|
134 |
+
}
|
135 |
+
|
136 |
+
|
137 |
+
@isp_api.route('/api/status', methods=['GET'])
|
138 |
+
@cross_origin()
|
139 |
+
def get_status():
|
140 |
+
"""Get system status including traffic router"""
|
141 |
+
try:
|
142 |
+
# Collect status from all components
|
143 |
+
status = {
|
144 |
+
'components': {},
|
145 |
+
'stats': {},
|
146 |
+
'timestamp': time.time(),
|
147 |
+
'uptime': time.time() - (dhcp_server.start_time if dhcp_server and dhcp_server.start_time else time.time())
|
148 |
+
}
|
149 |
+
|
150 |
+
# Component status
|
151 |
+
status['components']['dhcp_server'] = dhcp_server.is_running if dhcp_server else False
|
152 |
+
status['components']['nat_engine'] = nat_engine.is_running if nat_engine else False
|
153 |
+
status['components']['firewall_engine'] = firewall_engine.is_running if firewall_engine else False
|
154 |
+
status['components']['tcp_engine'] = tcp_engine.is_running if tcp_engine else False
|
155 |
+
status['components']['virtual_router'] = virtual_router.is_running if virtual_router else False
|
156 |
+
status['components']['socket_translator'] = socket_translator.is_running if socket_translator else False
|
157 |
+
status['components']['packet_bridge'] = packet_bridge.is_running if packet_bridge else False
|
158 |
+
status['components']['session_tracker'] = session_tracker.is_running if session_tracker else False
|
159 |
+
status['components']['logger'] = logger.is_running if logger else False
|
160 |
+
status['components']['traffic_router'] = traffic_router.is_running if traffic_router else False
|
161 |
+
|
162 |
+
# Statistics
|
163 |
+
status['stats']['dhcp_leases'] = len(dhcp_server.get_leases()) if dhcp_server else 0
|
164 |
+
status['stats']['nat_sessions'] = len(nat_engine.get_sessions()) if nat_engine else 0
|
165 |
+
status['stats']['firewall_rules'] = len(firewall_engine.get_rules()) if firewall_engine else 0
|
166 |
+
status['stats']['tcp_connections'] = len(tcp_engine.get_connections()) if tcp_engine else 0
|
167 |
+
status['stats']['total_sessions'] = len(session_tracker.get_sessions()) if session_tracker else 0
|
168 |
+
status['stats']['bridge_clients'] = len(packet_bridge.get_clients()) if packet_bridge else 0
|
169 |
+
|
170 |
+
# Traffic router stats
|
171 |
+
if traffic_router:
|
172 |
+
traffic_stats = traffic_router.get_stats()
|
173 |
+
status['stats']['traffic_router'] = traffic_stats
|
174 |
+
|
175 |
+
return jsonify({
|
176 |
+
'status': 'success',
|
177 |
+
'system_status': status
|
178 |
+
})
|
179 |
+
|
180 |
+
except Exception as e:
|
181 |
+
return jsonify({
|
182 |
+
'status': 'error',
|
183 |
+
'message': str(e)
|
184 |
+
}), 500
|
185 |
+
|
186 |
+
|
187 |
+
@isp_api.route('/api/config', methods=['POST'])
|
188 |
+
@cross_origin()
|
189 |
+
def update_config():
|
190 |
+
"""Update system configuration"""
|
191 |
+
try:
|
192 |
+
config_data = request.get_json()
|
193 |
+
|
194 |
+
# Here you would update the actual configuration
|
195 |
+
# For now, just return success
|
196 |
+
|
197 |
+
if logger:
|
198 |
+
logger.log(LogLevel.INFO, LogCategory.SYSTEM, 'api', 'Configuration updated', metadata=config_data)
|
199 |
+
|
200 |
+
return jsonify({
|
201 |
+
'status': 'success',
|
202 |
+
'message': 'Configuration updated successfully'
|
203 |
+
})
|
204 |
+
|
205 |
+
except Exception as e:
|
206 |
+
if logger:
|
207 |
+
logger.log(LogLevel.ERROR, LogCategory.SYSTEM, 'api', f'Configuration update failed: {str(e)}')
|
208 |
+
|
209 |
+
return jsonify({
|
210 |
+
'status': 'error',
|
211 |
+
'message': str(e)
|
212 |
+
}), 500
|
213 |
+
|
214 |
+
|
215 |
+
# DHCP endpoints
|
216 |
+
@isp_api.route('/api/dhcp/leases', methods=['GET'])
|
217 |
+
@cross_origin()
|
218 |
+
def get_dhcp_leases():
|
219 |
+
"""Get DHCP lease table"""
|
220 |
+
try:
|
221 |
+
if not dhcp_server:
|
222 |
+
return jsonify({'status': 'error', 'message': 'DHCP server not initialized'}), 500
|
223 |
+
|
224 |
+
leases = dhcp_server.get_leases()
|
225 |
+
|
226 |
+
return jsonify({
|
227 |
+
'status': 'success',
|
228 |
+
'leases': leases,
|
229 |
+
'count': len(leases)
|
230 |
+
})
|
231 |
+
|
232 |
+
except Exception as e:
|
233 |
+
return jsonify({
|
234 |
+
'status': 'error',
|
235 |
+
'message': str(e)
|
236 |
+
}), 500
|
237 |
+
|
238 |
+
|
239 |
+
@isp_api.route('/dhcp/leases/<mac_address>', methods=['DELETE'])
|
240 |
+
@cross_origin()
|
241 |
+
def release_dhcp_lease(mac_address):
|
242 |
+
"""Release DHCP lease"""
|
243 |
+
try:
|
244 |
+
if not dhcp_server:
|
245 |
+
return jsonify({'status': 'error', 'message': 'DHCP server not initialized'}), 500
|
246 |
+
|
247 |
+
success = dhcp_server.release_lease(mac_address)
|
248 |
+
|
249 |
+
if success:
|
250 |
+
if logger:
|
251 |
+
logger.info(LogCategory.DHCP, 'api', f'Released DHCP lease for {mac_address}')
|
252 |
+
|
253 |
+
return jsonify({
|
254 |
+
'status': 'success',
|
255 |
+
'message': f'Lease for {mac_address} released'
|
256 |
+
})
|
257 |
+
else:
|
258 |
+
return jsonify({
|
259 |
+
'status': 'error',
|
260 |
+
'message': f'Lease for {mac_address} not found'
|
261 |
+
}), 404
|
262 |
+
|
263 |
+
except Exception as e:
|
264 |
+
return jsonify({
|
265 |
+
'status': 'error',
|
266 |
+
'message': str(e)
|
267 |
+
}), 500
|
268 |
+
|
269 |
+
|
270 |
+
# NAT endpoints
|
271 |
+
@isp_api.route('/nat/sessions', methods=['GET'])
|
272 |
+
@cross_origin()
|
273 |
+
def get_nat_sessions():
|
274 |
+
"""Get NAT session table"""
|
275 |
+
try:
|
276 |
+
if not nat_engine:
|
277 |
+
return jsonify({'status': 'error', 'message': 'NAT engine not initialized'}), 500
|
278 |
+
|
279 |
+
sessions = nat_engine.get_sessions()
|
280 |
+
|
281 |
+
return jsonify({
|
282 |
+
'status': 'success',
|
283 |
+
'sessions': sessions,
|
284 |
+
'count': len(sessions)
|
285 |
+
})
|
286 |
+
|
287 |
+
except Exception as e:
|
288 |
+
return jsonify({
|
289 |
+
'status': 'error',
|
290 |
+
'message': str(e)
|
291 |
+
}), 500
|
292 |
+
|
293 |
+
|
294 |
+
@isp_api.route('/nat/stats', methods=['GET'])
|
295 |
+
@cross_origin()
|
296 |
+
def get_nat_stats():
|
297 |
+
"""Get NAT statistics"""
|
298 |
+
try:
|
299 |
+
if not nat_engine:
|
300 |
+
return jsonify({'status': 'error', 'message': 'NAT engine not initialized'}), 500
|
301 |
+
|
302 |
+
stats = nat_engine.get_stats()
|
303 |
+
|
304 |
+
return jsonify({
|
305 |
+
'status': 'success',
|
306 |
+
'stats': stats
|
307 |
+
})
|
308 |
+
|
309 |
+
except Exception as e:
|
310 |
+
return jsonify({
|
311 |
+
'status': 'error',
|
312 |
+
'message': str(e)
|
313 |
+
}), 500
|
314 |
+
|
315 |
+
|
316 |
+
# Firewall endpoints
|
317 |
+
@isp_api.route('/firewall/rules', methods=['GET'])
|
318 |
+
@cross_origin()
|
319 |
+
def get_firewall_rules():
|
320 |
+
"""Get firewall rules"""
|
321 |
+
try:
|
322 |
+
if not firewall_engine:
|
323 |
+
return jsonify({'status': 'error', 'message': 'Firewall engine not initialized'}), 500
|
324 |
+
|
325 |
+
rules = firewall_engine.get_rules()
|
326 |
+
|
327 |
+
return jsonify({
|
328 |
+
'status': 'success',
|
329 |
+
'rules': rules,
|
330 |
+
'count': len(rules)
|
331 |
+
})
|
332 |
+
|
333 |
+
except Exception as e:
|
334 |
+
return jsonify({
|
335 |
+
'status': 'error',
|
336 |
+
'message': str(e)
|
337 |
+
}), 500
|
338 |
+
|
339 |
+
|
340 |
+
@isp_api.route('/firewall/rules', methods=['POST'])
|
341 |
+
@cross_origin()
|
342 |
+
def add_firewall_rule():
|
343 |
+
"""Add firewall rule"""
|
344 |
+
try:
|
345 |
+
if not firewall_engine:
|
346 |
+
return jsonify({'status': 'error', 'message': 'Firewall engine not initialized'}), 500
|
347 |
+
|
348 |
+
rule_data = request.get_json()
|
349 |
+
|
350 |
+
# Build rule using builder
|
351 |
+
builder = FirewallRuleBuilder(rule_data['rule_id'])
|
352 |
+
builder.set_priority(rule_data.get('priority', 100))
|
353 |
+
builder.set_action(rule_data['action'])
|
354 |
+
builder.set_direction(rule_data.get('direction', 'BOTH'))
|
355 |
+
|
356 |
+
if 'source_ip' in rule_data:
|
357 |
+
builder.set_source_ip(rule_data['source_ip'])
|
358 |
+
if 'dest_ip' in rule_data:
|
359 |
+
builder.set_dest_ip(rule_data['dest_ip'])
|
360 |
+
if 'source_port' in rule_data:
|
361 |
+
builder.set_source_port(rule_data['source_port'])
|
362 |
+
if 'dest_port' in rule_data:
|
363 |
+
builder.set_dest_port(rule_data['dest_port'])
|
364 |
+
if 'protocol' in rule_data:
|
365 |
+
builder.set_protocol(rule_data['protocol'])
|
366 |
+
if 'description' in rule_data:
|
367 |
+
builder.set_description(rule_data['description'])
|
368 |
+
|
369 |
+
rule = builder.build()
|
370 |
+
success = firewall_engine.add_rule(rule)
|
371 |
+
|
372 |
+
if success:
|
373 |
+
if logger:
|
374 |
+
logger.info(LogCategory.FIREWALL, 'api', f'Added firewall rule: {rule.rule_id}')
|
375 |
+
|
376 |
+
return jsonify({
|
377 |
+
'status': 'success',
|
378 |
+
'message': f'Rule {rule.rule_id} added successfully'
|
379 |
+
})
|
380 |
+
else:
|
381 |
+
return jsonify({
|
382 |
+
'status': 'error',
|
383 |
+
'message': f'Rule {rule.rule_id} already exists'
|
384 |
+
}), 400
|
385 |
+
|
386 |
+
except Exception as e:
|
387 |
+
return jsonify({
|
388 |
+
'status': 'error',
|
389 |
+
'message': str(e)
|
390 |
+
}), 500
|
391 |
+
|
392 |
+
|
393 |
+
@isp_api.route('/firewall/rules/<rule_id>', methods=['DELETE'])
|
394 |
+
@cross_origin()
|
395 |
+
def delete_firewall_rule(rule_id):
|
396 |
+
"""Delete firewall rule"""
|
397 |
+
try:
|
398 |
+
if not firewall_engine:
|
399 |
+
return jsonify({'status': 'error', 'message': 'Firewall engine not initialized'}), 500
|
400 |
+
|
401 |
+
success = firewall_engine.remove_rule(rule_id)
|
402 |
+
|
403 |
+
if success:
|
404 |
+
if logger:
|
405 |
+
logger.info(LogCategory.FIREWALL, 'api', f'Deleted firewall rule: {rule_id}')
|
406 |
+
|
407 |
+
return jsonify({
|
408 |
+
'status': 'success',
|
409 |
+
'message': f'Rule {rule_id} deleted successfully'
|
410 |
+
})
|
411 |
+
else:
|
412 |
+
return jsonify({
|
413 |
+
'status': 'error',
|
414 |
+
'message': f'Rule {rule_id} not found'
|
415 |
+
}), 404
|
416 |
+
|
417 |
+
except Exception as e:
|
418 |
+
return jsonify({
|
419 |
+
'status': 'error',
|
420 |
+
'message': str(e)
|
421 |
+
}), 500
|
422 |
+
|
423 |
+
|
424 |
+
@isp_api.route('/firewall/logs', methods=['GET'])
|
425 |
+
@cross_origin()
|
426 |
+
def get_firewall_logs():
|
427 |
+
"""Get firewall logs"""
|
428 |
+
try:
|
429 |
+
if not firewall_engine:
|
430 |
+
return jsonify({'status': 'error', 'message': 'Firewall engine not initialized'}), 500
|
431 |
+
|
432 |
+
limit = request.args.get('limit', 100, type=int)
|
433 |
+
filter_action = request.args.get('action')
|
434 |
+
|
435 |
+
logs = firewall_engine.get_logs(limit=limit, filter_action=filter_action)
|
436 |
+
|
437 |
+
return jsonify({
|
438 |
+
'status': 'success',
|
439 |
+
'logs': logs,
|
440 |
+
'count': len(logs)
|
441 |
+
})
|
442 |
+
|
443 |
+
except Exception as e:
|
444 |
+
return jsonify({
|
445 |
+
'status': 'error',
|
446 |
+
'message': str(e)
|
447 |
+
}), 500
|
448 |
+
|
449 |
+
|
450 |
+
@isp_api.route('/firewall/stats', methods=['GET'])
|
451 |
+
@cross_origin()
|
452 |
+
def get_firewall_stats():
|
453 |
+
"""Get firewall statistics"""
|
454 |
+
try:
|
455 |
+
if not firewall_engine:
|
456 |
+
return jsonify({'status': 'error', 'message': 'Firewall engine not initialized'}), 500
|
457 |
+
|
458 |
+
stats = firewall_engine.get_stats()
|
459 |
+
|
460 |
+
return jsonify({
|
461 |
+
'status': 'success',
|
462 |
+
'stats': stats
|
463 |
+
})
|
464 |
+
|
465 |
+
except Exception as e:
|
466 |
+
return jsonify({
|
467 |
+
'status': 'error',
|
468 |
+
'message': str(e)
|
469 |
+
}), 500
|
470 |
+
|
471 |
+
|
472 |
+
# TCP connections endpoints
|
473 |
+
@isp_api.route('/tcp/connections', methods=['GET'])
|
474 |
+
@cross_origin()
|
475 |
+
def get_tcp_connections():
|
476 |
+
"""Get TCP connections"""
|
477 |
+
try:
|
478 |
+
if not tcp_engine:
|
479 |
+
return jsonify({'status': 'error', 'message': 'TCP engine not initialized'}), 500
|
480 |
+
|
481 |
+
connections = tcp_engine.get_connections()
|
482 |
+
|
483 |
+
return jsonify({
|
484 |
+
'status': 'success',
|
485 |
+
'connections': connections,
|
486 |
+
'count': len(connections)
|
487 |
+
})
|
488 |
+
|
489 |
+
except Exception as e:
|
490 |
+
return jsonify({
|
491 |
+
'status': 'error',
|
492 |
+
'message': str(e)
|
493 |
+
}), 500
|
494 |
+
|
495 |
+
|
496 |
+
# Router endpoints
|
497 |
+
@isp_api.route('/router/routes', methods=['GET'])
|
498 |
+
@cross_origin()
|
499 |
+
def get_routing_table():
|
500 |
+
"""Get routing table"""
|
501 |
+
try:
|
502 |
+
if not virtual_router:
|
503 |
+
return jsonify({'status': 'error', 'message': 'Virtual router not initialized'}), 500
|
504 |
+
|
505 |
+
routes = virtual_router.get_routing_table()
|
506 |
+
|
507 |
+
return jsonify({
|
508 |
+
'status': 'success',
|
509 |
+
'routes': routes,
|
510 |
+
'count': len(routes)
|
511 |
+
})
|
512 |
+
|
513 |
+
except Exception as e:
|
514 |
+
return jsonify({
|
515 |
+
'status': 'error',
|
516 |
+
'message': str(e)
|
517 |
+
}), 500
|
518 |
+
|
519 |
+
|
520 |
+
@isp_api.route('/router/interfaces', methods=['GET'])
|
521 |
+
@cross_origin()
|
522 |
+
def get_router_interfaces():
|
523 |
+
"""Get router interfaces"""
|
524 |
+
try:
|
525 |
+
if not virtual_router:
|
526 |
+
return jsonify({'status': 'error', 'message': 'Virtual router not initialized'}), 500
|
527 |
+
|
528 |
+
interfaces = virtual_router.get_interfaces()
|
529 |
+
|
530 |
+
return jsonify({
|
531 |
+
'status': 'success',
|
532 |
+
'interfaces': interfaces,
|
533 |
+
'count': len(interfaces)
|
534 |
+
})
|
535 |
+
|
536 |
+
except Exception as e:
|
537 |
+
return jsonify({
|
538 |
+
'status': 'error',
|
539 |
+
'message': str(e)
|
540 |
+
}), 500
|
541 |
+
|
542 |
+
|
543 |
+
@isp_api.route('/router/arp', methods=['GET'])
|
544 |
+
@cross_origin()
|
545 |
+
def get_arp_table():
|
546 |
+
"""Get ARP table"""
|
547 |
+
try:
|
548 |
+
if not virtual_router:
|
549 |
+
return jsonify({'status': 'error', 'message': 'Virtual router not initialized'}), 500
|
550 |
+
|
551 |
+
arp_table = virtual_router.get_arp_table()
|
552 |
+
|
553 |
+
return jsonify({
|
554 |
+
'status': 'success',
|
555 |
+
'arp_table': arp_table,
|
556 |
+
'count': len(arp_table)
|
557 |
+
})
|
558 |
+
|
559 |
+
except Exception as e:
|
560 |
+
return jsonify({
|
561 |
+
'status': 'error',
|
562 |
+
'message': str(e)
|
563 |
+
}), 500
|
564 |
+
|
565 |
+
|
566 |
+
@isp_api.route('/router/stats', methods=['GET'])
|
567 |
+
@cross_origin()
|
568 |
+
def get_router_stats():
|
569 |
+
"""Get router statistics"""
|
570 |
+
try:
|
571 |
+
if not virtual_router:
|
572 |
+
return jsonify({'status': 'error', 'message': 'Virtual router not initialized'}), 500
|
573 |
+
|
574 |
+
stats = virtual_router.get_stats()
|
575 |
+
|
576 |
+
return jsonify({
|
577 |
+
'status': 'success',
|
578 |
+
'stats': stats
|
579 |
+
})
|
580 |
+
|
581 |
+
except Exception as e:
|
582 |
+
return jsonify({
|
583 |
+
'status': 'error',
|
584 |
+
'message': str(e)
|
585 |
+
}), 500
|
586 |
+
|
587 |
+
|
588 |
+
# Bridge endpoints
|
589 |
+
@isp_api.route('/bridge/clients', methods=['GET'])
|
590 |
+
@cross_origin()
|
591 |
+
def get_bridge_clients():
|
592 |
+
"""Get bridge clients"""
|
593 |
+
try:
|
594 |
+
if not packet_bridge:
|
595 |
+
return jsonify({'status': 'error', 'message': 'Packet bridge not initialized'}), 500
|
596 |
+
|
597 |
+
clients = packet_bridge.get_clients()
|
598 |
+
|
599 |
+
return jsonify({
|
600 |
+
'status': 'success',
|
601 |
+
'clients': clients,
|
602 |
+
'count': len(clients)
|
603 |
+
})
|
604 |
+
|
605 |
+
except Exception as e:
|
606 |
+
return jsonify({
|
607 |
+
'status': 'error',
|
608 |
+
'message': str(e)
|
609 |
+
}), 500
|
610 |
+
|
611 |
+
|
612 |
+
@isp_api.route('/bridge/stats', methods=['GET'])
|
613 |
+
@cross_origin()
|
614 |
+
def get_bridge_stats():
|
615 |
+
"""Get bridge statistics"""
|
616 |
+
try:
|
617 |
+
if not packet_bridge:
|
618 |
+
return jsonify({'status': 'error', 'message': 'Packet bridge not initialized'}), 500
|
619 |
+
|
620 |
+
stats = packet_bridge.get_stats()
|
621 |
+
|
622 |
+
return jsonify({
|
623 |
+
'status': 'success',
|
624 |
+
'stats': stats
|
625 |
+
})
|
626 |
+
|
627 |
+
except Exception as e:
|
628 |
+
return jsonify({
|
629 |
+
'status': 'error',
|
630 |
+
'message': str(e)
|
631 |
+
}), 500
|
632 |
+
|
633 |
+
|
634 |
+
# Session tracking endpoints
|
635 |
+
@isp_api.route('/sessions', methods=['GET'])
|
636 |
+
@cross_origin()
|
637 |
+
def get_sessions():
|
638 |
+
"""Get sessions"""
|
639 |
+
try:
|
640 |
+
if not session_tracker:
|
641 |
+
return jsonify({'status': 'error', 'message': 'Session tracker not initialized'}), 500
|
642 |
+
|
643 |
+
limit = request.args.get('limit', 100, type=int)
|
644 |
+
offset = request.args.get('offset', 0, type=int)
|
645 |
+
|
646 |
+
sessions = session_tracker.get_sessions(limit=limit, offset=offset)
|
647 |
+
|
648 |
+
return jsonify({
|
649 |
+
'status': 'success',
|
650 |
+
'sessions': sessions,
|
651 |
+
'count': len(sessions)
|
652 |
+
})
|
653 |
+
|
654 |
+
except Exception as e:
|
655 |
+
return jsonify({
|
656 |
+
'status': 'error',
|
657 |
+
'message': str(e)
|
658 |
+
}), 500
|
659 |
+
|
660 |
+
|
661 |
+
@isp_api.route('/sessions/summary', methods=['GET'])
|
662 |
+
@cross_origin()
|
663 |
+
def get_session_summary():
|
664 |
+
"""Get session summary"""
|
665 |
+
try:
|
666 |
+
if not session_tracker:
|
667 |
+
return jsonify({'status': 'error', 'message': 'Session tracker not initialized'}), 500
|
668 |
+
|
669 |
+
summary = session_tracker.get_session_summary()
|
670 |
+
|
671 |
+
return jsonify({
|
672 |
+
'status': 'success',
|
673 |
+
'summary': summary
|
674 |
+
})
|
675 |
+
|
676 |
+
except Exception as e:
|
677 |
+
return jsonify({
|
678 |
+
'status': 'error',
|
679 |
+
'message': str(e)
|
680 |
+
}), 500
|
681 |
+
|
682 |
+
|
683 |
+
# Logging endpoints
|
684 |
+
@isp_api.route('/logs', methods=['GET'])
|
685 |
+
@cross_origin()
|
686 |
+
def get_logs():
|
687 |
+
"""Get system logs"""
|
688 |
+
try:
|
689 |
+
if not logger:
|
690 |
+
return jsonify({'status': 'error', 'message': 'Logger not initialized'}), 500
|
691 |
+
|
692 |
+
limit = request.args.get('limit', 100, type=int)
|
693 |
+
offset = request.args.get('offset', 0, type=int)
|
694 |
+
level = request.args.get('level')
|
695 |
+
category = request.args.get('category')
|
696 |
+
search = request.args.get('search')
|
697 |
+
|
698 |
+
if search:
|
699 |
+
logs = logger.search_logs(search, limit=limit)
|
700 |
+
else:
|
701 |
+
log_filter = LogFilter()
|
702 |
+
if level:
|
703 |
+
log_filter.level_filter = LogLevel(level.upper())
|
704 |
+
if category:
|
705 |
+
log_filter.category_filter = LogCategory(category.upper())
|
706 |
+
|
707 |
+
logs = logger.get_logs(limit=limit, offset=offset, log_filter=log_filter)
|
708 |
+
|
709 |
+
return jsonify({
|
710 |
+
'status': 'success',
|
711 |
+
'logs': logs,
|
712 |
+
'count': len(logs)
|
713 |
+
})
|
714 |
+
|
715 |
+
except Exception as e:
|
716 |
+
return jsonify({
|
717 |
+
'status': 'error',
|
718 |
+
'message': str(e)
|
719 |
+
}), 500
|
720 |
+
|
721 |
+
|
722 |
+
@isp_api.route('/logs/errors', methods=['GET'])
|
723 |
+
@cross_origin()
|
724 |
+
def get_error_logs():
|
725 |
+
"""Get recent error logs"""
|
726 |
+
try:
|
727 |
+
if not logger:
|
728 |
+
return jsonify({'status': 'error', 'message': 'Logger not initialized'}), 500
|
729 |
+
|
730 |
+
limit = request.args.get('limit', 50, type=int)
|
731 |
+
errors = logger.get_recent_errors(limit=limit)
|
732 |
+
|
733 |
+
return jsonify({
|
734 |
+
'status': 'success',
|
735 |
+
'errors': errors,
|
736 |
+
'count': len(errors)
|
737 |
+
})
|
738 |
+
|
739 |
+
except Exception as e:
|
740 |
+
return jsonify({
|
741 |
+
'status': 'error',
|
742 |
+
'message': str(e)
|
743 |
+
}), 500
|
744 |
+
|
745 |
+
|
746 |
+
# System status endpoints
|
747 |
+
@isp_api.route('/status', methods=['GET'])
|
748 |
+
@cross_origin()
|
749 |
+
def get_system_status():
|
750 |
+
"""Get overall system status"""
|
751 |
+
try:
|
752 |
+
status = {
|
753 |
+
'timestamp': time.time(),
|
754 |
+
'uptime': time.time() - (time.time() - 3600), # Placeholder
|
755 |
+
'components': {
|
756 |
+
'dhcp_server': dhcp_server is not None and dhcp_server.running,
|
757 |
+
'nat_engine': nat_engine is not None and nat_engine.running,
|
758 |
+
'firewall_engine': firewall_engine is not None,
|
759 |
+
'tcp_engine': tcp_engine is not None and tcp_engine.running,
|
760 |
+
'virtual_router': virtual_router is not None,
|
761 |
+
'socket_translator': socket_translator is not None and socket_translator.running,
|
762 |
+
'packet_bridge': packet_bridge is not None and packet_bridge.running,
|
763 |
+
'session_tracker': session_tracker is not None and session_tracker.running,
|
764 |
+
'logger': logger is not None and logger.running
|
765 |
+
},
|
766 |
+
'stats': {}
|
767 |
+
}
|
768 |
+
|
769 |
+
# Collect stats from each component
|
770 |
+
if dhcp_server:
|
771 |
+
status['stats']['dhcp_leases'] = len(dhcp_server.get_leases())
|
772 |
+
|
773 |
+
if nat_engine:
|
774 |
+
nat_stats = nat_engine.get_stats()
|
775 |
+
status['stats']['nat_sessions'] = nat_stats.get('active_sessions', 0)
|
776 |
+
|
777 |
+
if firewall_engine:
|
778 |
+
fw_stats = firewall_engine.get_stats()
|
779 |
+
status['stats']['firewall_rules'] = fw_stats.get('total_rules', 0)
|
780 |
+
|
781 |
+
if tcp_engine:
|
782 |
+
tcp_connections = tcp_engine.get_connections()
|
783 |
+
status['stats']['tcp_connections'] = len(tcp_connections)
|
784 |
+
|
785 |
+
if packet_bridge:
|
786 |
+
bridge_stats = packet_bridge.get_stats()
|
787 |
+
status['stats']['bridge_clients'] = bridge_stats.get('active_clients', 0)
|
788 |
+
|
789 |
+
if session_tracker:
|
790 |
+
session_stats = session_tracker.get_stats()
|
791 |
+
status['stats']['total_sessions'] = session_stats.get('active_sessions', 0)
|
792 |
+
|
793 |
+
return jsonify({
|
794 |
+
'status': 'success',
|
795 |
+
'system_status': status
|
796 |
+
})
|
797 |
+
|
798 |
+
except Exception as e:
|
799 |
+
return jsonify({
|
800 |
+
'status': 'error',
|
801 |
+
'message': str(e)
|
802 |
+
}), 500
|
803 |
+
|
804 |
+
|
805 |
+
@isp_api.route('/stats', methods=['GET'])
|
806 |
+
@cross_origin()
|
807 |
+
def get_system_stats():
|
808 |
+
"""Get comprehensive system statistics"""
|
809 |
+
try:
|
810 |
+
stats = {
|
811 |
+
'timestamp': time.time(),
|
812 |
+
'dhcp': dhcp_server.get_leases() if dhcp_server else {},
|
813 |
+
'nat': nat_engine.get_stats() if nat_engine else {},
|
814 |
+
'firewall': firewall_engine.get_stats() if firewall_engine else {},
|
815 |
+
'router': virtual_router.get_stats() if virtual_router else {},
|
816 |
+
'bridge': packet_bridge.get_stats() if packet_bridge else {},
|
817 |
+
'sessions': session_tracker.get_stats() if session_tracker else {},
|
818 |
+
'logger': logger.get_stats() if logger else {}
|
819 |
+
}
|
820 |
+
|
821 |
+
return jsonify({
|
822 |
+
'status': 'success',
|
823 |
+
'stats': stats
|
824 |
+
})
|
825 |
+
|
826 |
+
except Exception as e:
|
827 |
+
return jsonify({
|
828 |
+
'status': 'error',
|
829 |
+
'message': str(e)
|
830 |
+
}), 500
|
831 |
+
|
832 |
+
|
833 |
+
packet_bridge.start()
|
834 |
+
|
835 |
+
|
836 |
+
|
837 |
+
# OpenVPN endpoints
|
838 |
+
@isp_api.route('/openvpn/status', methods=['GET'])
|
839 |
+
@cross_origin()
|
840 |
+
def get_openvpn_status():
|
841 |
+
"""Get OpenVPN server status"""
|
842 |
+
try:
|
843 |
+
if not openvpn_manager:
|
844 |
+
return jsonify({
|
845 |
+
'status': 'error',
|
846 |
+
'message': 'OpenVPN manager not initialized'
|
847 |
+
}), 500
|
848 |
+
|
849 |
+
status = openvpn_manager.get_server_status()
|
850 |
+
|
851 |
+
return jsonify({
|
852 |
+
'status': 'success',
|
853 |
+
'openvpn_status': {
|
854 |
+
'is_running': status.is_running,
|
855 |
+
'connected_clients': status.connected_clients,
|
856 |
+
'total_bytes_received': status.total_bytes_received,
|
857 |
+
'total_bytes_sent': status.total_bytes_sent,
|
858 |
+
'uptime': status.uptime,
|
859 |
+
'server_ip': status.server_ip,
|
860 |
+
'server_port': status.server_port
|
861 |
+
}
|
862 |
+
})
|
863 |
+
|
864 |
+
except Exception as e:
|
865 |
+
return jsonify({
|
866 |
+
'status': 'error',
|
867 |
+
'message': str(e)
|
868 |
+
}), 500
|
869 |
+
|
870 |
+
|
871 |
+
@isp_api.route('/openvpn/start', methods=['POST'])
|
872 |
+
@cross_origin()
|
873 |
+
def start_openvpn_server():
|
874 |
+
"""Start OpenVPN server"""
|
875 |
+
try:
|
876 |
+
if not openvpn_manager:
|
877 |
+
return jsonify({
|
878 |
+
'status': 'error',
|
879 |
+
'message': 'OpenVPN manager not initialized'
|
880 |
+
}), 500
|
881 |
+
|
882 |
+
success = openvpn_manager.start_server()
|
883 |
+
|
884 |
+
if success:
|
885 |
+
return jsonify({
|
886 |
+
'status': 'success',
|
887 |
+
'message': 'OpenVPN server started successfully'
|
888 |
+
})
|
889 |
+
else:
|
890 |
+
return jsonify({
|
891 |
+
'status': 'error',
|
892 |
+
'message': 'Failed to start OpenVPN server'
|
893 |
+
}), 500
|
894 |
+
|
895 |
+
except Exception as e:
|
896 |
+
return jsonify({
|
897 |
+
'status': 'error',
|
898 |
+
'message': str(e)
|
899 |
+
}), 500
|
900 |
+
|
901 |
+
|
902 |
+
@isp_api.route('/openvpn/stop', methods=['POST'])
|
903 |
+
@cross_origin()
|
904 |
+
def stop_openvpn_server():
|
905 |
+
"""Stop OpenVPN server"""
|
906 |
+
try:
|
907 |
+
if not openvpn_manager:
|
908 |
+
return jsonify({
|
909 |
+
'status': 'error',
|
910 |
+
'message': 'OpenVPN manager not initialized'
|
911 |
+
}), 500
|
912 |
+
|
913 |
+
success = openvpn_manager.stop_server()
|
914 |
+
|
915 |
+
if success:
|
916 |
+
return jsonify({
|
917 |
+
'status': 'success',
|
918 |
+
'message': 'OpenVPN server stopped successfully'
|
919 |
+
})
|
920 |
+
else:
|
921 |
+
return jsonify({
|
922 |
+
'status': 'error',
|
923 |
+
'message': 'Failed to stop OpenVPN server'
|
924 |
+
}), 500
|
925 |
+
|
926 |
+
except Exception as e:
|
927 |
+
return jsonify({
|
928 |
+
'status': 'error',
|
929 |
+
'message': str(e)
|
930 |
+
}), 500
|
931 |
+
|
932 |
+
|
933 |
+
@isp_api.route('/openvpn/clients', methods=['GET'])
|
934 |
+
@cross_origin()
|
935 |
+
def get_openvpn_clients():
|
936 |
+
"""Get connected OpenVPN clients"""
|
937 |
+
try:
|
938 |
+
if not openvpn_manager:
|
939 |
+
return jsonify({
|
940 |
+
'status': 'error',
|
941 |
+
'message': 'OpenVPN manager not initialized'
|
942 |
+
}), 500
|
943 |
+
|
944 |
+
clients = openvpn_manager.get_connected_clients()
|
945 |
+
|
946 |
+
return jsonify({
|
947 |
+
'status': 'success',
|
948 |
+
'clients': clients
|
949 |
+
})
|
950 |
+
|
951 |
+
except Exception as e:
|
952 |
+
return jsonify({
|
953 |
+
'status': 'error',
|
954 |
+
'message': str(e)
|
955 |
+
}), 500
|
956 |
+
|
957 |
+
|
958 |
+
@isp_api.route('/openvpn/clients/<client_id>/disconnect', methods=['POST'])
|
959 |
+
@cross_origin()
|
960 |
+
def disconnect_openvpn_client(client_id):
|
961 |
+
"""Disconnect a specific OpenVPN client"""
|
962 |
+
try:
|
963 |
+
if not openvpn_manager:
|
964 |
+
return jsonify({
|
965 |
+
'status': 'error',
|
966 |
+
'message': 'OpenVPN manager not initialized'
|
967 |
+
}), 500
|
968 |
+
|
969 |
+
success = openvpn_manager.disconnect_client(client_id)
|
970 |
+
|
971 |
+
if success:
|
972 |
+
return jsonify({
|
973 |
+
'status': 'success',
|
974 |
+
'message': f'Client {client_id} disconnected successfully'
|
975 |
+
})
|
976 |
+
else:
|
977 |
+
return jsonify({
|
978 |
+
'status': 'error',
|
979 |
+
'message': f'Failed to disconnect client {client_id}'
|
980 |
+
}), 500
|
981 |
+
|
982 |
+
except Exception as e:
|
983 |
+
return jsonify({
|
984 |
+
'status': 'error',
|
985 |
+
'message': str(e)
|
986 |
+
}), 500
|
987 |
+
|
988 |
+
|
989 |
+
@isp_api.route('/openvpn/config/<client_name>', methods=['GET'])
|
990 |
+
@cross_origin()
|
991 |
+
def get_client_config(client_name):
|
992 |
+
"""Generate client configuration file"""
|
993 |
+
try:
|
994 |
+
if not openvpn_manager:
|
995 |
+
return jsonify({
|
996 |
+
'status': 'error',
|
997 |
+
'message': 'OpenVPN manager not initialized'
|
998 |
+
}), 500
|
999 |
+
|
1000 |
+
# Get server IP from request or use default
|
1001 |
+
server_ip = request.args.get('server_ip', '127.0.0.1')
|
1002 |
+
|
1003 |
+
config = openvpn_manager.generate_client_config(client_name, server_ip)
|
1004 |
+
|
1005 |
+
if config:
|
1006 |
+
return Response(
|
1007 |
+
config,
|
1008 |
+
mimetype='text/plain',
|
1009 |
+
headers={'Content-Disposition': f'attachment; filename={client_name}.ovpn'}
|
1010 |
+
)
|
1011 |
+
else:
|
1012 |
+
return jsonify({
|
1013 |
+
'status': 'error',
|
1014 |
+
'message': 'Failed to generate client configuration'
|
1015 |
+
}), 500
|
1016 |
+
|
1017 |
+
except Exception as e:
|
1018 |
+
return jsonify({
|
1019 |
+
'status': 'error',
|
1020 |
+
'message': str(e)
|
1021 |
+
}), 500
|
1022 |
+
|
1023 |
+
|
1024 |
+
@isp_api.route('/openvpn/stats', methods=['GET'])
|
1025 |
+
@cross_origin()
|
1026 |
+
def get_openvpn_stats():
|
1027 |
+
"""Get comprehensive OpenVPN statistics"""
|
1028 |
+
try:
|
1029 |
+
if not openvpn_manager:
|
1030 |
+
return jsonify({
|
1031 |
+
'status': 'error',
|
1032 |
+
'message': 'OpenVPN manager not initialized'
|
1033 |
+
}), 500
|
1034 |
+
|
1035 |
+
stats = openvpn_manager.get_statistics()
|
1036 |
+
|
1037 |
+
return jsonify({
|
1038 |
+
'status': 'success',
|
1039 |
+
'openvpn_stats': stats
|
1040 |
+
})
|
1041 |
+
|
1042 |
+
except Exception as e:
|
1043 |
+
return jsonify({
|
1044 |
+
'status': 'error',
|
1045 |
+
'message': str(e)
|
1046 |
+
}), 500
|
1047 |
+
|
1048 |
+
|
1049 |
+
@isp_api.route('/openvpn/configs', methods=['GET'])
|
1050 |
+
@cross_origin()
|
1051 |
+
def list_client_configs():
|
1052 |
+
"""List all stored client configurations"""
|
1053 |
+
try:
|
1054 |
+
if not openvpn_manager:
|
1055 |
+
return jsonify({
|
1056 |
+
'status': 'error',
|
1057 |
+
'message': 'OpenVPN manager not initialized'
|
1058 |
+
}), 500
|
1059 |
+
|
1060 |
+
configs = openvpn_manager.list_client_configs()
|
1061 |
+
|
1062 |
+
return jsonify({
|
1063 |
+
'status': 'success',
|
1064 |
+
'configs': configs,
|
1065 |
+
'count': len(configs)
|
1066 |
+
})
|
1067 |
+
|
1068 |
+
except Exception as e:
|
1069 |
+
return jsonify({
|
1070 |
+
'status': 'error',
|
1071 |
+
'message': str(e)
|
1072 |
+
}), 500
|
1073 |
+
|
1074 |
+
|
1075 |
+
@isp_api.route('/openvpn/configs/<client_name>', methods=['GET'])
|
1076 |
+
@cross_origin()
|
1077 |
+
def get_stored_client_config(client_name):
|
1078 |
+
"""Get stored client configuration"""
|
1079 |
+
try:
|
1080 |
+
if not openvpn_manager:
|
1081 |
+
return jsonify({
|
1082 |
+
'status': 'error',
|
1083 |
+
'message': 'OpenVPN manager not initialized'
|
1084 |
+
}), 500
|
1085 |
+
|
1086 |
+
config_content = openvpn_manager.load_client_config(client_name)
|
1087 |
+
|
1088 |
+
if config_content:
|
1089 |
+
return Response(
|
1090 |
+
config_content,
|
1091 |
+
mimetype='text/plain',
|
1092 |
+
headers={'Content-Disposition': f'attachment; filename={client_name}.ovpn'}
|
1093 |
+
)
|
1094 |
+
else:
|
1095 |
+
return jsonify({
|
1096 |
+
'status': 'error',
|
1097 |
+
'message': f'Configuration for {client_name} not found'
|
1098 |
+
}), 404
|
1099 |
+
|
1100 |
+
except Exception as e:
|
1101 |
+
return jsonify({
|
1102 |
+
'status': 'error',
|
1103 |
+
'message': str(e)
|
1104 |
+
}), 500
|
1105 |
+
|
1106 |
+
|
1107 |
+
@isp_api.route('/openvpn/configs/<client_name>', methods=['DELETE'])
|
1108 |
+
@cross_origin()
|
1109 |
+
def delete_stored_client_config(client_name):
|
1110 |
+
"""Delete stored client configuration"""
|
1111 |
+
try:
|
1112 |
+
if not openvpn_manager:
|
1113 |
+
return jsonify({
|
1114 |
+
'status': 'error',
|
1115 |
+
'message': 'OpenVPN manager not initialized'
|
1116 |
+
}), 500
|
1117 |
+
|
1118 |
+
success = openvpn_manager.delete_client_config(client_name)
|
1119 |
+
|
1120 |
+
if success:
|
1121 |
+
return jsonify({
|
1122 |
+
'status': 'success',
|
1123 |
+
'message': f'Configuration for {client_name} deleted successfully'
|
1124 |
+
})
|
1125 |
+
else:
|
1126 |
+
return jsonify({
|
1127 |
+
'status': 'error',
|
1128 |
+
'message': f'Configuration for {client_name} not found'
|
1129 |
+
}), 404
|
1130 |
+
|
1131 |
+
except Exception as e:
|
1132 |
+
return jsonify({
|
1133 |
+
'status': 'error',
|
1134 |
+
'message': str(e)
|
1135 |
+
}), 500
|
1136 |
+
|
1137 |
+
|
1138 |
+
@isp_api.route('/openvpn/configs/<client_name>/generate', methods=['POST'])
|
1139 |
+
@cross_origin()
|
1140 |
+
def generate_and_save_client_config(client_name):
|
1141 |
+
"""Generate and save client configuration"""
|
1142 |
+
try:
|
1143 |
+
if not openvpn_manager:
|
1144 |
+
return jsonify({
|
1145 |
+
'status': 'error',
|
1146 |
+
'message': 'OpenVPN manager not initialized'
|
1147 |
+
}), 500
|
1148 |
+
|
1149 |
+
# Get server IP from request or use default
|
1150 |
+
server_ip = request.args.get('server_ip', '127.0.0.1')
|
1151 |
+
|
1152 |
+
config_content = openvpn_manager.generate_and_save_client_config(client_name, server_ip)
|
1153 |
+
|
1154 |
+
if config_content:
|
1155 |
+
return jsonify({
|
1156 |
+
'status': 'success',
|
1157 |
+
'message': f'Configuration for {client_name} generated and saved successfully'
|
1158 |
+
})
|
1159 |
+
else:
|
1160 |
+
return jsonify({
|
1161 |
+
'status': 'error',
|
1162 |
+
'message': f'Failed to generate configuration for {client_name}'
|
1163 |
+
}), 500
|
1164 |
+
|
1165 |
+
except Exception as e:
|
1166 |
+
return jsonify({
|
1167 |
+
'status': 'error',
|
1168 |
+
'message': str(e)
|
1169 |
+
}), 500
|
1170 |
+
|
1171 |
+
|
routes/user.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Blueprint, request, jsonify
|
2 |
+
from models.user import User, db
|
3 |
+
|
4 |
+
user_bp = Blueprint('user', __name__)
|
5 |
+
|
6 |
+
@user_bp.route('/users', methods=['GET'])
|
7 |
+
def get_users():
|
8 |
+
users = User.query.all()
|
9 |
+
return jsonify([user.to_dict() for user in users])
|
10 |
+
|
11 |
+
@user_bp.route('/users', methods=['POST'])
|
12 |
+
def create_user():
|
13 |
+
|
14 |
+
data = request.json
|
15 |
+
user = User(username=data['username'], email=data['email'])
|
16 |
+
db.session.add(user)
|
17 |
+
db.session.commit()
|
18 |
+
return jsonify(user.to_dict()), 201
|
19 |
+
|
20 |
+
@user_bp.route('/users/<int:user_id>', methods=['GET'])
|
21 |
+
def get_user(user_id):
|
22 |
+
user = User.query.get_or_404(user_id)
|
23 |
+
return jsonify(user.to_dict())
|
24 |
+
|
25 |
+
@user_bp.route('/users/<int:user_id>', methods=['PUT'])
|
26 |
+
def update_user(user_id):
|
27 |
+
user = User.query.get_or_404(user_id)
|
28 |
+
data = request.json
|
29 |
+
user.username = data.get('username', user.username)
|
30 |
+
user.email = data.get('email', user.email)
|
31 |
+
db.session.commit()
|
32 |
+
return jsonify(user.to_dict())
|
33 |
+
|
34 |
+
@user_bp.route('/users/<int:user_id>', methods=['DELETE'])
|
35 |
+
def delete_user(user_id):
|
36 |
+
user = User.query.get_or_404(user_id)
|
37 |
+
db.session.delete(user)
|
38 |
+
db.session.commit()
|
39 |
+
return '', 204
|