Create network.py
Browse files- network.py +134 -0
network.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
from torch import optim
|
5 |
+
import math
|
6 |
+
from typing import Optional
|
7 |
+
|
8 |
+
from .node import CognitiveNode
|
9 |
+
|
10 |
+
class DynamicCognitiveNet(nn.Module):
|
11 |
+
"""Self-organizing cognitive network with structure learning"""
|
12 |
+
def __init__(self, input_size: int, output_size: int):
|
13 |
+
super().__init__()
|
14 |
+
self.input_size = input_size
|
15 |
+
self.output_size = output_size
|
16 |
+
|
17 |
+
# Initialize core nodes
|
18 |
+
self.nodes = nn.ModuleDict({
|
19 |
+
f'input_{i}': CognitiveNode(i, 1) for i in range(input_size)
|
20 |
+
})
|
21 |
+
self.output_nodes = nn.ModuleList([
|
22 |
+
CognitiveNode(input_size + i, input_size)
|
23 |
+
for i in range(output_size)
|
24 |
+
])
|
25 |
+
|
26 |
+
# Structure learning parameters
|
27 |
+
self.connection_strength = nn.ParameterDict()
|
28 |
+
self.recent_activations = {}
|
29 |
+
self.init_connections()
|
30 |
+
|
31 |
+
# Emotional context
|
32 |
+
self.emotional_state = nn.Parameter(torch.tensor(0.0))
|
33 |
+
self.learning_rate = 0.01
|
34 |
+
|
35 |
+
# Adaptive learning
|
36 |
+
self.optimizer = optim.AdamW(self.parameters(), lr=0.001)
|
37 |
+
self.loss_fn = nn.MSELoss()
|
38 |
+
|
39 |
+
def init_connections(self):
|
40 |
+
"""Initialize sparse random connections"""
|
41 |
+
for i in range(self.input_size):
|
42 |
+
for out_node in self.output_nodes:
|
43 |
+
conn_id = f'input_{i}->{out_node.id}'
|
44 |
+
self.connection_strength[conn_id] = nn.Parameter(
|
45 |
+
torch.randn(1) * 0.1
|
46 |
+
)
|
47 |
+
|
48 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
49 |
+
# Process inputs
|
50 |
+
activations = {}
|
51 |
+
for i in range(self.input_size):
|
52 |
+
node = self.nodes[f'input_{i}']
|
53 |
+
activations[node.id] = node(x[i].unsqueeze(0))
|
54 |
+
|
55 |
+
# Propagate through network
|
56 |
+
outputs = []
|
57 |
+
for out_node in self.output_nodes:
|
58 |
+
input_acts = []
|
59 |
+
for i in range(self.input_size):
|
60 |
+
conn_id = f'input_{i}->{out_node.id}'
|
61 |
+
weight = self.connection_strength.get(conn_id, torch.tensor(0.0))
|
62 |
+
input_acts.append(activations[i] * torch.sigmoid(weight))
|
63 |
+
|
64 |
+
if input_acts:
|
65 |
+
combined = sum(input_acts) / math.sqrt(len(input_acts))
|
66 |
+
out_act = out_node(combined)
|
67 |
+
outputs.append(out_act)
|
68 |
+
|
69 |
+
return torch.cat(outputs)
|
70 |
+
|
71 |
+
def structural_update(self, reward: float):
|
72 |
+
"""Adapt network structure based on performance"""
|
73 |
+
# Strengthen productive connections
|
74 |
+
for conn_id, weight in self.connection_strength.items():
|
75 |
+
if reward > 0:
|
76 |
+
new_strength = weight + self.learning_rate * reward
|
77 |
+
else:
|
78 |
+
new_strength = weight * 0.9
|
79 |
+
self.connection_strength[conn_id] = nn.Parameter(
|
80 |
+
torch.clamp(new_strength, -1, 1)
|
81 |
+
)
|
82 |
+
|
83 |
+
# Add new connections if performance is poor
|
84 |
+
if reward < -0.5 and torch.rand(1).item() < 0.3:
|
85 |
+
new_conn = self._create_new_connection()
|
86 |
+
if new_conn:
|
87 |
+
self.connection_strength[new_conn] = nn.Parameter(
|
88 |
+
torch.randn(1) * 0.1
|
89 |
+
)
|
90 |
+
|
91 |
+
def _create_new_connection(self) -> Optional[str]:
|
92 |
+
"""Create new random connection between underutilized nodes"""
|
93 |
+
# Find least active nodes
|
94 |
+
node_activations = {
|
95 |
+
node_id: torch.mean(torch.stack(list(node.recent_activations.values())))
|
96 |
+
for node_id, node in self.nodes.items()
|
97 |
+
if hasattr(node, 'recent_activations') and node.recent_activations
|
98 |
+
}
|
99 |
+
|
100 |
+
if not node_activations:
|
101 |
+
return None
|
102 |
+
|
103 |
+
# Select random underutilized node pair
|
104 |
+
sorted_nodes = sorted(node_activations.items(), key=lambda x: x[1])
|
105 |
+
if len(sorted_nodes) < 2:
|
106 |
+
return None
|
107 |
+
|
108 |
+
source = sorted_nodes[0][0]
|
109 |
+
target = sorted_nodes[1][0]
|
110 |
+
|
111 |
+
return f"{source}->{target}"
|
112 |
+
|
113 |
+
def train_step(self, x: torch.Tensor, y: torch.Tensor) -> float:
|
114 |
+
"""Execute a single training step"""
|
115 |
+
self.optimizer.zero_grad()
|
116 |
+
pred = self(x)
|
117 |
+
loss = self.loss_fn(pred, y)
|
118 |
+
|
119 |
+
# Add structural regularization
|
120 |
+
reg_loss = sum(torch.abs(w).mean() for w in self.connection_strength.values())
|
121 |
+
total_loss = loss + 0.01 * reg_loss
|
122 |
+
|
123 |
+
total_loss.backward()
|
124 |
+
self.optimizer.step()
|
125 |
+
|
126 |
+
# Update emotional context
|
127 |
+
self.emotional_state.data = torch.sigmoid(
|
128 |
+
self.emotional_state + (0.5 - loss.item()) * 0.1
|
129 |
+
)
|
130 |
+
|
131 |
+
# Structural updates
|
132 |
+
self.structural_update(reward=0.5 - loss.item())
|
133 |
+
|
134 |
+
return total_loss.item()
|