зеркало из
				https://github.com/docxology/cognitive.git
				synced 2025-10-31 13:16:05 +02:00 
			
		
		
		
	
		
			
				
	
	
		
			324 строки
		
	
	
		
			13 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			324 строки
		
	
	
		
			13 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| """
 | |
| Earth System Simulator implementation.
 | |
| Provides multi-scale simulation of ecological, climate and human systems.
 | |
| """
 | |
| 
 | |
| import logging
 | |
| from typing import Dict, List, Optional, Tuple, Union, Any
 | |
| import numpy as np
 | |
| from dataclasses import dataclass
 | |
| 
 | |
| from earth_systems import (
 | |
|     SystemState, EcologicalState, ClimateState, 
 | |
|     HumanImpactState, SimulationConfig
 | |
| )
 | |
| 
 | |
| logger = logging.getLogger(__name__)
 | |
| 
 | |
| @dataclass
 | |
| class ModelState:
 | |
|     """Class representing the state of an Active Inference model."""
 | |
|     beliefs: np.ndarray
 | |
|     policies: np.ndarray
 | |
|     precision: float
 | |
|     free_energy: float
 | |
|     prediction_error: float
 | |
| 
 | |
| class EarthSystemSimulator:
 | |
|     """Earth System Simulator implementation."""
 | |
|     
 | |
|     def __init__(self, config_dict: Dict[str, Any]):
 | |
|         """Initialize simulator with configuration."""
 | |
|         try:
 | |
|             self.config = SimulationConfig.from_dict(config_dict)
 | |
|             logger.info("Initialized Earth System Simulator with configuration")
 | |
|             
 | |
|             # Set random seed if provided
 | |
|             if self.config.random_seed is not None:
 | |
|                 np.random.seed(self.config.random_seed)
 | |
|                 logger.info(f"Set random seed to {self.config.random_seed}")
 | |
|                 
 | |
|         except Exception as e:
 | |
|             logger.error(f"Failed to initialize simulator: {str(e)}")
 | |
|             raise
 | |
|     
 | |
|     def run_scale_simulation(self, scale: str, **kwargs) -> Dict[str, Any]:
 | |
|         """Run simulation at specified scale."""
 | |
|         try:
 | |
|             logger.info(f"Running simulation at {scale} scale")
 | |
|             # TODO: Implement scale-specific simulation logic
 | |
|             return {"scale": scale, "status": "simulated"}
 | |
|         except Exception as e:
 | |
|             logger.error(f"Error in scale simulation: {str(e)}")
 | |
|             raise
 | |
|     
 | |
|     def test_intervention(self, intervention_type: str, **kwargs) -> Dict[str, Any]:
 | |
|         """Test intervention strategy."""
 | |
|         try:
 | |
|             logger.info(f"Testing {intervention_type} intervention")
 | |
|             # TODO: Implement intervention testing logic
 | |
|             return {"intervention": intervention_type, "status": "tested"}
 | |
|         except Exception as e:
 | |
|             logger.error(f"Error in intervention test: {str(e)}")
 | |
|             raise
 | |
|     
 | |
|     def analyze_stability(self, scenario: str) -> Dict[str, Any]:
 | |
|         """Analyze system stability."""
 | |
|         try:
 | |
|             logger.info(f"Analyzing stability for scenario: {scenario}")
 | |
|             # TODO: Implement stability analysis logic
 | |
|             return {"scenario": scenario, "status": "analyzed"}
 | |
|         except Exception as e:
 | |
|             logger.error(f"Error in stability analysis: {str(e)}")
 | |
|             raise
 | |
|     
 | |
|     def analyze_resilience(self, disturbance_type: str) -> Dict[str, Any]:
 | |
|         """Analyze system resilience."""
 | |
|         try:
 | |
|             logger.info(f"Analyzing resilience for disturbance: {disturbance_type}")
 | |
|             # TODO: Implement resilience analysis logic
 | |
|             return {"disturbance": disturbance_type, "status": "analyzed"}
 | |
|         except Exception as e:
 | |
|             logger.error(f"Error in resilience analysis: {str(e)}")
 | |
|             raise
 | |
|     
 | |
|     def _compute_ecological_health(self, state: SystemState) -> float:
 | |
|         """Compute ecological health metric."""
 | |
|         # TODO: Implement ecological health computation
 | |
|         return 0.0
 | |
|     
 | |
|     def _compute_climate_stability(self, state: SystemState) -> float:
 | |
|         """Compute climate stability metric."""
 | |
|         # TODO: Implement climate stability computation
 | |
|         return 0.0
 | |
|     
 | |
|     def _compute_social_wellbeing(self, state: SystemState) -> float:
 | |
|         """Compute social wellbeing metric."""
 | |
|         # TODO: Implement social wellbeing computation
 | |
|         return 0.0
 | |
|     
 | |
|     def test_belief_update(self,
 | |
|                           method: str,
 | |
|                           initial_beliefs: np.ndarray,
 | |
|                           observation: np.ndarray,
 | |
|                           **kwargs) -> Dict[str, Any]:
 | |
|         """Test belief updating mechanisms."""
 | |
|         try:
 | |
|             logger.info(f"Testing {method} belief updates")
 | |
|             
 | |
|             if method == "variational":
 | |
|                 updated_beliefs = self._variational_belief_update(
 | |
|                     initial_beliefs, observation, **kwargs)
 | |
|             elif method == "sampling":
 | |
|                 updated_beliefs = self._sampling_belief_update(
 | |
|                     initial_beliefs, observation, **kwargs)
 | |
|             else:
 | |
|                 raise ValueError(f"Unknown belief update method: {method}")
 | |
|             
 | |
|             return {
 | |
|                 "method": method,
 | |
|                 "initial_beliefs": initial_beliefs.tolist(),
 | |
|                 "observation": observation.tolist(),
 | |
|                 "updated_beliefs": updated_beliefs.tolist()
 | |
|             }
 | |
|             
 | |
|         except Exception as e:
 | |
|             logger.error(f"Error in belief update test: {str(e)}")
 | |
|             raise
 | |
|     
 | |
|     def test_policy_inference(self,
 | |
|                             initial_state: ModelState,
 | |
|                             goal_prior: np.ndarray,
 | |
|                             goal_type: str,
 | |
|                             **kwargs) -> Dict[str, Any]:
 | |
|         """Test policy inference mechanisms."""
 | |
|         try:
 | |
|             logger.info(f"Testing policy inference for {goal_type} goal")
 | |
|             
 | |
|             # Compute expected free energy
 | |
|             expected_free_energy = self._compute_expected_free_energy(
 | |
|                 initial_state, goal_prior)
 | |
|             
 | |
|             # Infer policies
 | |
|             inferred_policies = self._softmax(-expected_free_energy)
 | |
|             
 | |
|             return {
 | |
|                 "goal_type": goal_type,
 | |
|                 "initial_state": self._state_to_dict(initial_state),
 | |
|                 "goal_prior": goal_prior.tolist(),
 | |
|                 "inferred_policies": inferred_policies.tolist(),
 | |
|                 "expected_free_energy": expected_free_energy.tolist()
 | |
|             }
 | |
|             
 | |
|         except Exception as e:
 | |
|             logger.error(f"Error in policy inference test: {str(e)}")
 | |
|             raise
 | |
|     
 | |
|     def test_free_energy(self,
 | |
|                         component: str,
 | |
|                         temporal_horizon: int,
 | |
|                         precision: float) -> Dict[str, Any]:
 | |
|         """Test free energy computation."""
 | |
|         try:
 | |
|             logger.info(f"Testing {component} free energy computation")
 | |
|             
 | |
|             # Generate example trajectory
 | |
|             time = np.linspace(0, temporal_horizon, 100)
 | |
|             state = 0.5 + 0.2 * np.sin(time/10)
 | |
|             prediction = 0.5 + 0.2 * np.sin((time-1)/10)
 | |
|             
 | |
|             if component == "accuracy":
 | |
|                 energy = -0.5 * precision * (state - prediction)**2
 | |
|             elif component == "complexity":
 | |
|                 energy = -np.log(np.abs(np.gradient(state)) + 1e-8)
 | |
|             elif component == "expected":
 | |
|                 energy = self._compute_expected_free_energy_trajectory(
 | |
|                     state, prediction, precision)
 | |
|             elif component == "full":
 | |
|                 accuracy = -0.5 * precision * (state - prediction)**2
 | |
|                 complexity = -np.log(np.abs(np.gradient(state)) + 1e-8)
 | |
|                 energy = accuracy + complexity
 | |
|             else:
 | |
|                 raise ValueError(f"Unknown free energy component: {component}")
 | |
|             
 | |
|             return {
 | |
|                 "component": component,
 | |
|                 "time": time.tolist(),
 | |
|                 "state": state.tolist(),
 | |
|                 "prediction": prediction.tolist(),
 | |
|                 "energy": energy.tolist()
 | |
|             }
 | |
|             
 | |
|         except Exception as e:
 | |
|             logger.error(f"Error in free energy test: {str(e)}")
 | |
|             raise
 | |
|     
 | |
|     def test_hierarchical_inference(self,
 | |
|                                   level: str,
 | |
|                                   coupling_strength: float,
 | |
|                                   top_down_weight: float,
 | |
|                                   bottom_up_weight: float) -> Dict[str, Any]:
 | |
|         """Test hierarchical inference."""
 | |
|         try:
 | |
|             logger.info(f"Testing hierarchical inference at {level} level")
 | |
|             
 | |
|             # Generate example multi-scale dynamics
 | |
|             time = np.linspace(0, 100, 100)
 | |
|             
 | |
|             # Generate beliefs at different scales
 | |
|             micro = 0.5 + 0.2 * np.sin(time/5) + np.random.normal(0, 0.02, 100)
 | |
|             meso = 0.5 + 0.2 * np.sin(time/10) + np.random.normal(0, 0.02, 100)
 | |
|             macro = 0.5 + 0.2 * np.sin(time/20) + np.random.normal(0, 0.02, 100)
 | |
|             
 | |
|             # Compute coupling effects
 | |
|             if level == "micro":
 | |
|                 state = micro
 | |
|                 top_down = meso
 | |
|                 bottom_up = None
 | |
|             elif level == "meso":
 | |
|                 state = meso
 | |
|                 top_down = macro
 | |
|                 bottom_up = micro
 | |
|             elif level == "macro":
 | |
|                 state = macro
 | |
|                 top_down = None
 | |
|                 bottom_up = meso
 | |
|             else:
 | |
|                 raise ValueError(f"Unknown hierarchical level: {level}")
 | |
|             
 | |
|             # Compute information flow
 | |
|             prediction_error = np.zeros_like(time)
 | |
|             information_flow = np.zeros_like(time)
 | |
|             
 | |
|             for t in range(1, len(time)):
 | |
|                 # Prediction error
 | |
|                 prediction_error[t] = state[t] - state[t-1]
 | |
|                 
 | |
|                 # Information flow (approximated by mutual information)
 | |
|                 if top_down is not None and bottom_up is not None:
 | |
|                     information_flow[t] = (
 | |
|                         coupling_strength * (
 | |
|                             top_down_weight * (state[t] - top_down[t]) +
 | |
|                             bottom_up_weight * (state[t] - bottom_up[t])
 | |
|                         )
 | |
|                     )
 | |
|                 elif top_down is not None:
 | |
|                     information_flow[t] = (
 | |
|                         coupling_strength * top_down_weight * (state[t] - top_down[t])
 | |
|                     )
 | |
|                 elif bottom_up is not None:
 | |
|                     information_flow[t] = (
 | |
|                         coupling_strength * bottom_up_weight * (state[t] - bottom_up[t])
 | |
|                     )
 | |
|             
 | |
|             return {
 | |
|                 "level": level,
 | |
|                 "time": time.tolist(),
 | |
|                 "state": state.tolist(),
 | |
|                 "prediction_error": prediction_error.tolist(),
 | |
|                 "information_flow": information_flow.tolist(),
 | |
|                 "coupling_params": {
 | |
|                     "strength": coupling_strength,
 | |
|                     "top_down_weight": top_down_weight,
 | |
|                     "bottom_up_weight": bottom_up_weight
 | |
|                 }
 | |
|             }
 | |
|             
 | |
|         except Exception as e:
 | |
|             logger.error(f"Error in hierarchical inference test: {str(e)}")
 | |
|             raise
 | |
|     
 | |
|     def _variational_belief_update(self,
 | |
|                                  beliefs: np.ndarray,
 | |
|                                  observation: np.ndarray,
 | |
|                                  learning_rate: float = 0.1) -> np.ndarray:
 | |
|         """Implement variational belief updates."""
 | |
|         prediction = beliefs  # Simple identity mapping for demonstration
 | |
|         prediction_error = observation - prediction
 | |
|         updated_beliefs = beliefs + learning_rate * prediction_error
 | |
|         return updated_beliefs / np.sum(updated_beliefs)
 | |
|     
 | |
|     def _sampling_belief_update(self,
 | |
|                               beliefs: np.ndarray,
 | |
|                               observation: np.ndarray,
 | |
|                               num_samples: int = 1000) -> np.ndarray:
 | |
|         """Implement sampling-based belief updates."""
 | |
|         # Simple particle filter implementation
 | |
|         particles = np.random.dirichlet(beliefs * num_samples, size=num_samples)
 | |
|         weights = np.exp(-0.5 * np.sum((particles - observation[None, :])**2, axis=1))
 | |
|         weights /= np.sum(weights)
 | |
|         resampled = particles[np.random.choice(num_samples, num_samples, p=weights)]
 | |
|         return np.mean(resampled, axis=0)
 | |
|     
 | |
|     def _compute_expected_free_energy(self,
 | |
|                                     state: ModelState,
 | |
|                                     goal_prior: np.ndarray) -> np.ndarray:
 | |
|         """Compute expected free energy for policy selection."""
 | |
|         # Simple implementation combining goal-seeking and uncertainty reduction
 | |
|         goal_seeking = -np.abs(state.beliefs - goal_prior)
 | |
|         uncertainty = -np.sum(state.beliefs * np.log(state.beliefs + 1e-8))
 | |
|         return goal_seeking - 0.5 * uncertainty
 | |
|     
 | |
|     def _compute_expected_free_energy_trajectory(self,
 | |
|                                                state: np.ndarray,
 | |
|                                                prediction: np.ndarray,
 | |
|                                                precision: float) -> np.ndarray:
 | |
|         """Compute expected free energy over a trajectory."""
 | |
|         accuracy = -0.5 * precision * (state - prediction)**2
 | |
|         entropy = -state * np.log(state + 1e-8)
 | |
|         return accuracy - entropy
 | |
|     
 | |
|     def _state_to_dict(self, state: ModelState) -> Dict[str, Any]:
 | |
|         """Convert ModelState to dictionary."""
 | |
|         return {
 | |
|             "beliefs": state.beliefs.tolist(),
 | |
|             "policies": state.policies.tolist(),
 | |
|             "precision": state.precision,
 | |
|             "free_energy": state.free_energy,
 | |
|             "prediction_error": state.prediction_error
 | |
|         }
 | |
|     
 | |
|     def _softmax(self, x: np.ndarray) -> np.ndarray:
 | |
|         """Compute softmax probabilities."""
 | |
|         exp_x = np.exp(x - np.max(x))
 | |
|         return exp_x / np.sum(exp_x)  | 
