Update node.py
Browse files
node.py
CHANGED
@@ -1,54 +1,49 @@
|
|
|
|
1 |
import torch
|
2 |
import torch.nn as nn
|
|
|
3 |
from .memory import CognitiveMemory
|
4 |
|
5 |
class CognitiveNode(nn.Module):
|
6 |
-
"""
|
7 |
def __init__(self, node_id: int, input_size: int):
|
8 |
super().__init__()
|
9 |
self.id = node_id
|
10 |
self.input_size = input_size
|
11 |
-
self.activation = 0.0
|
12 |
|
13 |
-
#
|
14 |
-
self.weights = nn.Parameter(torch.randn(
|
15 |
self.bias = nn.Parameter(torch.zeros(1))
|
|
|
16 |
|
17 |
-
#
|
18 |
-
self.memory = CognitiveMemory(context_size=1) # Changed from input_size to 1
|
19 |
-
|
20 |
-
# Neurotransmitter levels
|
21 |
self.dopamine = nn.Parameter(torch.tensor(0.5))
|
22 |
self.serotonin = nn.Parameter(torch.tensor(0.5))
|
23 |
|
24 |
-
#
|
25 |
-
self.recent_activations =
|
26 |
-
|
27 |
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
28 |
-
#
|
29 |
-
inputs = inputs.reshape(1)
|
30 |
-
|
31 |
-
# Memory influence
|
32 |
mem_context = self.memory.retrieve(inputs)
|
33 |
-
|
34 |
-
# Combine inputs with memory context
|
35 |
combined = inputs * 0.7 + mem_context * 0.3
|
36 |
|
37 |
-
#
|
38 |
-
base_activation = torch.tanh(combined
|
39 |
-
modulated = base_activation * (1 + self.dopamine
|
|
|
40 |
|
41 |
-
#
|
42 |
self.memory.add_memory(inputs, modulated.item())
|
43 |
-
|
44 |
-
# Store recent activation
|
45 |
-
self.recent_activations[len(self.recent_activations)] = modulated.item()
|
46 |
-
if len(self.recent_activations) > 100:
|
47 |
-
self.recent_activations.pop(min(self.recent_activations.keys()))
|
48 |
|
49 |
return modulated
|
50 |
-
|
51 |
def update_plasticity(self, reward: float):
|
52 |
-
"""
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
# cognitive_net/node.py
|
2 |
import torch
|
3 |
import torch.nn as nn
|
4 |
+
from collections import deque
|
5 |
from .memory import CognitiveMemory
|
6 |
|
7 |
class CognitiveNode(nn.Module):
|
8 |
+
"""Autonomous neural unit with neuromodulatory dynamics"""
|
9 |
def __init__(self, node_id: int, input_size: int):
|
10 |
super().__init__()
|
11 |
self.id = node_id
|
12 |
self.input_size = input_size
|
|
|
13 |
|
14 |
+
# Adaptive processing components
|
15 |
+
self.weights = nn.Parameter(torch.randn(input_size) * 0.1)
|
16 |
self.bias = nn.Parameter(torch.zeros(1))
|
17 |
+
self.memory = CognitiveMemory(context_size=input_size)
|
18 |
|
19 |
+
# Neuromodulatory state
|
|
|
|
|
|
|
20 |
self.dopamine = nn.Parameter(torch.tensor(0.5))
|
21 |
self.serotonin = nn.Parameter(torch.tensor(0.5))
|
22 |
|
23 |
+
# Activation history (rolling window)
|
24 |
+
self.recent_activations = deque(maxlen=100)
|
25 |
+
|
26 |
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
27 |
+
# Memory integration
|
|
|
|
|
|
|
28 |
mem_context = self.memory.retrieve(inputs)
|
|
|
|
|
29 |
combined = inputs * 0.7 + mem_context * 0.3
|
30 |
|
31 |
+
# Neurotransmitter-modulated activation
|
32 |
+
base_activation = torch.tanh(combined @ self.weights + self.bias)
|
33 |
+
modulated = base_activation * (1 + torch.sigmoid(self.dopamine)
|
34 |
+
- torch.sigmoid(self.serotonin))
|
35 |
|
36 |
+
# Memory consolidation
|
37 |
self.memory.add_memory(inputs, modulated.item())
|
38 |
+
self.recent_activations.append(modulated.item())
|
|
|
|
|
|
|
|
|
39 |
|
40 |
return modulated
|
41 |
+
|
42 |
def update_plasticity(self, reward: float):
|
43 |
+
"""Adaptive neuromodulation based on performance"""
|
44 |
+
with torch.no_grad():
|
45 |
+
self.dopamine += reward * 0.1
|
46 |
+
self.serotonin -= reward * 0.05
|
47 |
+
# Maintain neurotransmitter bounds
|
48 |
+
self.dopamine.clamp_(0, 1)
|
49 |
+
self.serotonin.clamp_(0, 1)
|