|
import torch |
|
import torch.nn as nn |
|
from .memory import CognitiveMemory |
|
|
|
class CognitiveNode(nn.Module): |
|
"""Differentiable cognitive node with dynamic plasticity""" |
|
def __init__(self, node_id: int, input_size: int): |
|
super().__init__() |
|
self.id = node_id |
|
self.input_size = input_size |
|
self.activation = 0.0 |
|
|
|
|
|
self.weights = nn.Parameter(torch.randn(1)) |
|
self.bias = nn.Parameter(torch.zeros(1)) |
|
|
|
|
|
self.memory = CognitiveMemory(context_size=1) |
|
|
|
|
|
self.dopamine = nn.Parameter(torch.tensor(0.5)) |
|
self.serotonin = nn.Parameter(torch.tensor(0.5)) |
|
|
|
|
|
self.recent_activations = {} |
|
|
|
def forward(self, inputs: torch.Tensor) -> torch.Tensor: |
|
|
|
inputs = inputs.reshape(1) |
|
|
|
|
|
mem_context = self.memory.retrieve(inputs) |
|
|
|
|
|
combined = inputs * 0.7 + mem_context * 0.3 |
|
|
|
|
|
base_activation = torch.tanh(combined * self.weights + self.bias) |
|
modulated = base_activation * (1 + self.dopamine - self.serotonin) |
|
|
|
|
|
self.memory.add_memory(inputs, modulated.item()) |
|
|
|
|
|
self.recent_activations[len(self.recent_activations)] = modulated.item() |
|
if len(self.recent_activations) > 100: |
|
self.recent_activations.pop(min(self.recent_activations.keys())) |
|
|
|
return modulated |
|
|
|
def update_plasticity(self, reward: float): |
|
"""Update neurotransmitter levels based on reward signal""" |
|
self.dopamine.data = torch.sigmoid(self.dopamine + reward * 0.1) |
|
self.serotonin.data = torch.sigmoid(self.serotonin - reward * 0.05) |