cognitive_net / node.py
vincentiusyoshuac's picture
Update node.py
9d2804b verified
raw
history blame
2.13 kB
import torch
import torch.nn as nn
from .memory import CognitiveMemory
class CognitiveNode(nn.Module):
"""Differentiable cognitive node with dynamic plasticity"""
def __init__(self, node_id: int, input_size: int):
super().__init__()
self.id = node_id
self.input_size = input_size
self.activation = 0.0
# Dynamic input weights with Hebbian plasticity
self.weights = nn.Parameter(torch.randn(1)) # Changed from input_size to 1
self.bias = nn.Parameter(torch.zeros(1))
# Memory system - adjusted context size
self.memory = CognitiveMemory(context_size=1) # Changed from input_size to 1
# Neurotransmitter levels
self.dopamine = nn.Parameter(torch.tensor(0.5))
self.serotonin = nn.Parameter(torch.tensor(0.5))
# Store recent activations
self.recent_activations = {}
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
# Ensure inputs is a single value tensor
inputs = inputs.reshape(1)
# Memory influence
mem_context = self.memory.retrieve(inputs)
# Combine inputs with memory context
combined = inputs * 0.7 + mem_context * 0.3
# Adaptive activation with neurotransmitter modulation
base_activation = torch.tanh(combined * self.weights + self.bias)
modulated = base_activation * (1 + self.dopamine - self.serotonin)
# Update memory
self.memory.add_memory(inputs, modulated.item())
# Store recent activation
self.recent_activations[len(self.recent_activations)] = modulated.item()
if len(self.recent_activations) > 100:
self.recent_activations.pop(min(self.recent_activations.keys()))
return modulated
def update_plasticity(self, reward: float):
"""Update neurotransmitter levels based on reward signal"""
self.dopamine.data = torch.sigmoid(self.dopamine + reward * 0.1)
self.serotonin.data = torch.sigmoid(self.serotonin - reward * 0.05)