cognitive_net / node.py
vincentiusyoshuac's picture
Update node.py
a135dcc verified
raw
history blame
1.86 kB
# cognitive_net/node.py
import torch
import torch.nn as nn
from collections import deque
from .memory import CognitiveMemory
class CognitiveNode(nn.Module):
"""Autonomous neural unit with neuromodulatory dynamics"""
def __init__(self, node_id: int, input_size: int):
super().__init__()
self.id = node_id
self.input_size = input_size
# Adaptive processing components
self.weights = nn.Parameter(torch.randn(input_size) * 0.1)
self.bias = nn.Parameter(torch.zeros(1))
self.memory = CognitiveMemory(context_size=input_size)
# Neuromodulatory state
self.dopamine = nn.Parameter(torch.tensor(0.5))
self.serotonin = nn.Parameter(torch.tensor(0.5))
# Activation history (rolling window)
self.recent_activations = deque(maxlen=100)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
# Memory integration
mem_context = self.memory.retrieve(inputs)
combined = inputs * 0.7 + mem_context * 0.3
# Neurotransmitter-modulated activation
base_activation = torch.tanh(combined @ self.weights + self.bias)
modulated = base_activation * (1 + torch.sigmoid(self.dopamine)
- torch.sigmoid(self.serotonin))
# Memory consolidation
self.memory.add_memory(inputs, modulated.item())
self.recent_activations.append(modulated.item())
return modulated
def update_plasticity(self, reward: float):
"""Adaptive neuromodulation based on performance"""
with torch.no_grad():
self.dopamine += reward * 0.1
self.serotonin -= reward * 0.05
# Maintain neurotransmitter bounds
self.dopamine.clamp_(0, 1)
self.serotonin.clamp_(0, 1)