File size: 1,862 Bytes
a135dcc 1f0ef7a a135dcc 1f0ef7a a135dcc 1f0ef7a a135dcc 1f0ef7a a135dcc 1f0ef7a a135dcc 1f0ef7a a135dcc 1f0ef7a a135dcc 1f0ef7a a135dcc 1f0ef7a a135dcc 1f0ef7a a135dcc 9d2804b 1f0ef7a a135dcc 1f0ef7a a135dcc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
# cognitive_net/node.py
import torch
import torch.nn as nn
from collections import deque
from .memory import CognitiveMemory
class CognitiveNode(nn.Module):
"""Autonomous neural unit with neuromodulatory dynamics"""
def __init__(self, node_id: int, input_size: int):
super().__init__()
self.id = node_id
self.input_size = input_size
# Adaptive processing components
self.weights = nn.Parameter(torch.randn(input_size) * 0.1)
self.bias = nn.Parameter(torch.zeros(1))
self.memory = CognitiveMemory(context_size=input_size)
# Neuromodulatory state
self.dopamine = nn.Parameter(torch.tensor(0.5))
self.serotonin = nn.Parameter(torch.tensor(0.5))
# Activation history (rolling window)
self.recent_activations = deque(maxlen=100)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
# Memory integration
mem_context = self.memory.retrieve(inputs)
combined = inputs * 0.7 + mem_context * 0.3
# Neurotransmitter-modulated activation
base_activation = torch.tanh(combined @ self.weights + self.bias)
modulated = base_activation * (1 + torch.sigmoid(self.dopamine)
- torch.sigmoid(self.serotonin))
# Memory consolidation
self.memory.add_memory(inputs, modulated.item())
self.recent_activations.append(modulated.item())
return modulated
def update_plasticity(self, reward: float):
"""Adaptive neuromodulation based on performance"""
with torch.no_grad():
self.dopamine += reward * 0.1
self.serotonin -= reward * 0.05
# Maintain neurotransmitter bounds
self.dopamine.clamp_(0, 1)
self.serotonin.clamp_(0, 1) |