зеркало из
				https://github.com/docxology/cognitive.git
				synced 2025-10-31 13:16:05 +02:00 
			
		
		
		
	
		
			
				
	
	
		
			1496 строки
		
	
	
		
			53 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			1496 строки
		
	
	
		
			53 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| """
 | |
| Test suite for Continuous Active Inference implementation.
 | |
| Tests are organized in increasing complexity:
 | |
| 1. Basic initialization and properties
 | |
| 2. Single-step dynamics
 | |
| 3. Multi-step evolution
 | |
| 4. Complex dynamical behaviors
 | |
| """
 | |
| 
 | |
| import numpy as np
 | |
| import pytest
 | |
| from pathlib import Path
 | |
| import matplotlib.pyplot as plt
 | |
| import seaborn as sns
 | |
| import os
 | |
| import json
 | |
| from typing import Dict, List, Optional, Union
 | |
| import matplotlib.animation as animation
 | |
| 
 | |
| from continuous_generic import (
 | |
|     ContinuousState,
 | |
|     ContinuousActiveInference
 | |
| )
 | |
| from visualization import ContinuousVisualizer
 | |
| 
 | |
| # Configure pytest markers
 | |
| def pytest_configure(config):
 | |
|     """Configure pytest with custom markers."""
 | |
|     config.addinivalue_line(
 | |
|         "markers", 
 | |
|         "basic: mark test as basic initialization and property test"
 | |
|     )
 | |
|     config.addinivalue_line(
 | |
|         "markers", 
 | |
|         "single_step: mark test as single step dynamics test"
 | |
|     )
 | |
|     config.addinivalue_line(
 | |
|         "markers", 
 | |
|         "multi_step: mark test as multi-step evolution test"
 | |
|     )
 | |
|     config.addinivalue_line(
 | |
|         "markers", 
 | |
|         "complex: mark test as complex dynamical behavior test"
 | |
|     )
 | |
| 
 | |
| # Create conftest.py with marker configuration
 | |
| conftest_content = '''
 | |
| def pytest_configure(config):
 | |
|     """Configure pytest with custom markers."""
 | |
|     config.addinivalue_line(
 | |
|         "markers", 
 | |
|         "basic: mark test as basic initialization and property test"
 | |
|     )
 | |
|     config.addinivalue_line(
 | |
|         "markers", 
 | |
|         "single_step: mark test as single step dynamics test"
 | |
|     )
 | |
|     config.addinivalue_line(
 | |
|         "markers", 
 | |
|         "multi_step: mark test as multi-step evolution test"
 | |
|     )
 | |
|     config.addinivalue_line(
 | |
|         "markers", 
 | |
|         "complex: mark test as complex dynamical behavior test"
 | |
|     )
 | |
| '''
 | |
| 
 | |
| # Write conftest.py if it doesn't exist
 | |
| conftest_path = Path(__file__).parent / 'conftest.py'
 | |
| if not conftest_path.exists():
 | |
|     with open(conftest_path, 'w') as f:
 | |
|         f.write(conftest_content)
 | |
| 
 | |
| # Create output directory for tests with clear structure
 | |
| TEST_OUTPUT_DIR = Path("Output/tests")
 | |
| for subdir in ['basic', 'single_step', 'multi_step', 'complex']:
 | |
|     (TEST_OUTPUT_DIR / subdir).mkdir(parents=True, exist_ok=True)
 | |
| 
 | |
| def save_test_diagnostics(name: str, data: dict, subdir: str = 'basic'):
 | |
|     """Save test diagnostics for visualization."""
 | |
|     save_dir = TEST_OUTPUT_DIR / subdir / name
 | |
|     save_dir.mkdir(parents=True, exist_ok=True)
 | |
|     
 | |
|     # Helper function to convert numpy arrays to lists recursively
 | |
|     def convert_to_serializable(obj):
 | |
|         if isinstance(obj, np.ndarray):
 | |
|             return obj.tolist()
 | |
|         elif isinstance(obj, dict):
 | |
|             return {k: convert_to_serializable(v) for k, v in obj.items()}
 | |
|         elif isinstance(obj, list):
 | |
|             return [convert_to_serializable(item) for item in obj]
 | |
|         elif isinstance(obj, (np.int32, np.int64)):
 | |
|             return int(obj)
 | |
|         elif isinstance(obj, (np.float32, np.float64)):
 | |
|             return float(obj)
 | |
|         return obj
 | |
|     
 | |
|     # Save raw data with proper conversion
 | |
|     with open(save_dir / 'data.json', 'w') as f:
 | |
|         json.dump(convert_to_serializable(data), f, indent=2)
 | |
|     
 | |
|     # Create diagnostic plots based on data type
 | |
|     if 'belief_means' in data:
 | |
|         plot_belief_diagnostics(data, save_dir)
 | |
|     if 'free_energy' in data:
 | |
|         plot_energy_diagnostics(data, save_dir)
 | |
|     if 'state_history' in data:
 | |
|         plot_trajectory_diagnostics(data, save_dir)
 | |
|         if 'energy_history' in data:
 | |
|             plot_energy_conservation(data, save_dir)
 | |
|     if 'prediction_error' in data:
 | |
|         plot_prediction_diagnostics(data, save_dir)
 | |
|     if 'test_cases' in data and 'results' in data:
 | |
|         plot_sensory_mapping_diagnostics(data, save_dir)
 | |
| 
 | |
| def plot_sensory_mapping_diagnostics(data: dict, save_dir: Path):
 | |
|     """Create diagnostic plots for sensory mapping tests."""
 | |
|     plt.figure(figsize=(15, 10))
 | |
|     
 | |
|     # Plot 1: Input vs Output magnitudes
 | |
|     plt.subplot(2, 2, 1)
 | |
|     for result in data['results']:
 | |
|         if result['ratio'] is not None:
 | |
|             input_mag = np.linalg.norm(result['input'])
 | |
|             output_mag = np.linalg.norm(result['output'])
 | |
|             plt.scatter(input_mag, output_mag)
 | |
|     plt.plot([0, plt.xlim()[1]], [0, plt.xlim()[1]], 'r--', alpha=0.5)
 | |
|     plt.xlabel('Input Magnitude')
 | |
|     plt.ylabel('Output Magnitude')
 | |
|     plt.title('Input vs Output Magnitudes')
 | |
|     plt.grid(True)
 | |
|     
 | |
|     # Plot 2: Scaling factors across test cases
 | |
|     plt.subplot(2, 2, 2)
 | |
|     scale_factors = [r['scale_factor'] for r in data['results'] if r['scale_factor'] is not None]
 | |
|     if scale_factors:
 | |
|         plt.boxplot(scale_factors)
 | |
|         plt.axhline(y=np.mean(scale_factors), color='r', linestyle='--', label='Mean')
 | |
|         plt.ylabel('Scale Factor')
 | |
|         plt.title('Distribution of Scale Factors')
 | |
|         plt.grid(True)
 | |
|     
 | |
|     # Plot 3: Component-wise comparison
 | |
|     plt.subplot(2, 2, 3)
 | |
|     for result in data['results']:
 | |
|         if result['ratio'] is not None:
 | |
|             plt.scatter(result['input'], result['output'])
 | |
|     min_val = min(plt.xlim()[0], plt.ylim()[0])
 | |
|     max_val = max(plt.xlim()[1], plt.ylim()[1])
 | |
|     plt.plot([min_val, max_val], [min_val, max_val], 'r--', alpha=0.5)
 | |
|     plt.xlabel('Input Components')
 | |
|     plt.ylabel('Output Components')
 | |
|     plt.title('Component-wise Input vs Output')
 | |
|     plt.grid(True)
 | |
|     
 | |
|     # Plot 4: Relative errors
 | |
|     plt.subplot(2, 2, 4)
 | |
|     errors = []
 | |
|     for result in data['results']:
 | |
|         if result['ratio'] is not None:
 | |
|             input_arr = np.array(result['input'])
 | |
|             output_arr = np.array(result['output'])
 | |
|             scale = np.mean(np.abs(output_arr) / np.abs(input_arr))
 | |
|             rel_error = np.abs(output_arr - scale * input_arr) / (np.abs(scale * input_arr) + 1e-10)
 | |
|             errors.extend(rel_error)
 | |
|     if errors:
 | |
|         plt.hist(errors, bins=20)
 | |
|         plt.xlabel('Relative Error')
 | |
|         plt.ylabel('Count')
 | |
|         plt.title('Distribution of Relative Errors')
 | |
|         plt.grid(True)
 | |
|     
 | |
|     plt.tight_layout()
 | |
|     plt.savefig(save_dir / 'sensory_mapping_analysis.png')
 | |
|     plt.close()
 | |
| 
 | |
| def plot_belief_diagnostics(data: dict, save_dir: Path):
 | |
|     """Create diagnostic plots for belief evolution."""
 | |
|     means = np.array(data['belief_means'])
 | |
|     
 | |
|     # Plot 1: State evolution over time
 | |
|     plt.figure(figsize=(15, 5 * means.shape[2]))
 | |
|     for order in range(means.shape[2]):
 | |
|         plt.subplot(means.shape[2], 1, order + 1)
 | |
|         for state in range(means.shape[1]):
 | |
|             plt.plot(means[:, state, order], 
 | |
|                     label=f'State {state+1}')
 | |
|             if 'belief_precisions' in data:
 | |
|                 precisions = np.array(data['belief_precisions'])
 | |
|                 std = 1.0 / np.sqrt(precisions[:, state, order])
 | |
|                 time = np.arange(len(means))
 | |
|                 plt.fill_between(time, 
 | |
|                                means[:, state, order] - 2*std,
 | |
|                                means[:, state, order] + 2*std,
 | |
|                                alpha=0.2)
 | |
|         plt.title(f'Order {order} Evolution')
 | |
|         plt.xlabel('Time Step')
 | |
|         plt.ylabel(f'{"Position" if order == 0 else "Velocity" if order == 1 else "Acceleration"}')
 | |
|         plt.legend()
 | |
|         plt.grid(True)
 | |
|     
 | |
|     plt.tight_layout()
 | |
|     plt.savefig(save_dir / 'belief_evolution.png')
 | |
|     plt.close()
 | |
|     
 | |
|     # Plot 2: Phase space visualization
 | |
|     if means.shape[2] >= 2:
 | |
|         plt.figure(figsize=(10, 10))
 | |
|         for state in range(means.shape[1]):
 | |
|             plt.plot(means[:, state, 0], means[:, state, 1], 
 | |
|                     label=f'State {state+1}')
 | |
|             plt.plot(means[0, state, 0], means[0, state, 1], 'go')
 | |
|             plt.plot(means[-1, state, 0], means[-1, state, 1], 'ro')
 | |
|         plt.title('Phase Space (Position vs Velocity)')
 | |
|         plt.xlabel('Position')
 | |
|         plt.ylabel('Velocity')
 | |
|         plt.legend()
 | |
|         plt.grid(True)
 | |
|         plt.savefig(save_dir / 'phase_space.png')
 | |
|         plt.close()
 | |
| 
 | |
| def plot_energy_diagnostics(data: dict, save_dir: Path):
 | |
|     """Create diagnostic plots for energy evolution."""
 | |
|     plt.figure(figsize=(15, 12))
 | |
|     
 | |
|     # Plot 1: Raw free energy
 | |
|     plt.subplot(3, 1, 1)
 | |
|     energy = np.array(data['free_energy'])
 | |
|     plt.plot(energy)
 | |
|     plt.title('Free Energy Evolution')
 | |
|     plt.xlabel('Time Step')
 | |
|     plt.ylabel('Free Energy')
 | |
|     plt.grid(True)
 | |
|     
 | |
|     # Plot 2: Energy changes
 | |
|     plt.subplot(3, 1, 2)
 | |
|     changes = np.diff(energy)
 | |
|     plt.plot(changes)
 | |
|     plt.axhline(y=0, color='r', linestyle='--')
 | |
|     plt.title('Free Energy Changes')
 | |
|     plt.xlabel('Time Step')
 | |
|     plt.ylabel('Energy Change')
 | |
|     plt.grid(True)
 | |
|     
 | |
|     # Plot 3: Running statistics
 | |
|     plt.subplot(3, 1, 3)
 | |
|     window = min(20, len(changes))
 | |
|     running_mean = np.convolve(changes, np.ones(window)/window, mode='valid')
 | |
|     running_std = np.array([np.std(changes[i:i+window]) 
 | |
|                           for i in range(len(changes)-window+1)])
 | |
|     
 | |
|     plt.plot(running_mean, label='Running Mean')
 | |
|     plt.fill_between(np.arange(len(running_mean)),
 | |
|                     running_mean - running_std,
 | |
|                     running_mean + running_std,
 | |
|                     alpha=0.2)
 | |
|     plt.axhline(y=0, color='r', linestyle='--')
 | |
|     plt.title(f'Running Statistics (window={window})')
 | |
|     plt.xlabel('Time Step')
 | |
|     plt.ylabel('Energy Change')
 | |
|     plt.legend()
 | |
|     plt.grid(True)
 | |
|     
 | |
|     plt.tight_layout()
 | |
|     plt.savefig(save_dir / 'energy_diagnostics.png')
 | |
|     plt.close()
 | |
|     
 | |
|     # Additional plot: Energy distribution
 | |
|     plt.figure(figsize=(8, 6))
 | |
|     plt.hist(energy, bins=30, density=True)
 | |
|     plt.axvline(np.mean(energy), color='r', linestyle='--', label='Mean')
 | |
|     plt.axvline(np.median(energy), color='g', linestyle='--', label='Median')
 | |
|     plt.title('Free Energy Distribution')
 | |
|     plt.xlabel('Free Energy')
 | |
|     plt.ylabel('Density')
 | |
|     plt.legend()
 | |
|     plt.grid(True)
 | |
|     plt.savefig(save_dir / 'energy_distribution.png')
 | |
|     plt.close()
 | |
| 
 | |
| def plot_trajectory_diagnostics(data: dict, save_dir: Path):
 | |
|     """Create diagnostic plots for state trajectories."""
 | |
|     states = np.array(data['state_history'])
 | |
|     
 | |
|     # 1. Phase space plots
 | |
|     plt.figure(figsize=(15, 5))
 | |
|     
 | |
|     # Position space
 | |
|     plt.subplot(1, 3, 1)
 | |
|     plt.plot(states[:, 0, 0], states[:, 1, 0])
 | |
|     plt.plot(states[0, 0, 0], states[0, 1, 0], 'go', label='Start')
 | |
|     plt.plot(states[-1, 0, 0], states[-1, 1, 0], 'ro', label='End')
 | |
|     plt.title('Position Space')
 | |
|     plt.xlabel('x₁')
 | |
|     plt.ylabel('x₂')
 | |
|     plt.grid(True)
 | |
|     plt.legend()
 | |
|     
 | |
|     # Velocity space
 | |
|     plt.subplot(1, 3, 2)
 | |
|     plt.plot(states[:, 0, 1], states[:, 1, 1])
 | |
|     plt.plot(states[0, 0, 1], states[0, 1, 1], 'go', label='Start')
 | |
|     plt.plot(states[-1, 0, 1], states[-1, 1, 1], 'ro', label='End')
 | |
|     plt.title('Velocity Space')
 | |
|     plt.xlabel('v₁')
 | |
|     plt.ylabel('v₂')
 | |
|     plt.grid(True)
 | |
|     plt.legend()
 | |
|     
 | |
|     # Position-velocity space for first state
 | |
|     plt.subplot(1, 3, 3)
 | |
|     plt.plot(states[:, 0, 0], states[:, 0, 1])
 | |
|     plt.plot(states[0, 0, 0], states[0, 0, 1], 'go', label='Start')
 | |
|     plt.plot(states[-1, 0, 0], states[-1, 0, 1], 'ro', label='End')
 | |
|     plt.title('Phase Space (State 1)')
 | |
|     plt.xlabel('x₁')
 | |
|     plt.ylabel('v₁')
 | |
|     plt.grid(True)
 | |
|     plt.legend()
 | |
|     
 | |
|     plt.tight_layout()
 | |
|     plt.savefig(save_dir / 'phase_space.png')
 | |
|     plt.close()
 | |
|     
 | |
|     # 2. Time evolution plots
 | |
|     plt.figure(figsize=(15, 10))
 | |
|     time = np.arange(len(states))
 | |
|     
 | |
|     # Position evolution
 | |
|     plt.subplot(2, 2, 1)
 | |
|     for i in range(states.shape[1]):
 | |
|         plt.plot(time, states[:, i, 0], label=f'x{i+1}')
 | |
|     plt.title('Position Evolution')
 | |
|     plt.xlabel('Time Step')
 | |
|     plt.ylabel('Position')
 | |
|     plt.legend()
 | |
|     plt.grid(True)
 | |
|     
 | |
|     # Velocity evolution
 | |
|     plt.subplot(2, 2, 2)
 | |
|     for i in range(states.shape[1]):
 | |
|         plt.plot(time, states[:, i, 1], label=f'v{i+1}')
 | |
|     plt.title('Velocity Evolution')
 | |
|     plt.xlabel('Time Step')
 | |
|     plt.ylabel('Velocity')
 | |
|     plt.legend()
 | |
|     plt.grid(True)
 | |
|     
 | |
|     # Acceleration evolution
 | |
|     plt.subplot(2, 2, 3)
 | |
|     for i in range(states.shape[1]):
 | |
|         plt.plot(time, states[:, i, 2], label=f'a{i+1}')
 | |
|     plt.title('Acceleration Evolution')
 | |
|     plt.xlabel('Time Step')
 | |
|     plt.ylabel('Acceleration')
 | |
|     plt.legend()
 | |
|     plt.grid(True)
 | |
|     
 | |
|     # State norms evolution
 | |
|     plt.subplot(2, 2, 4)
 | |
|     pos_norm = np.linalg.norm(states[:, :, 0], axis=1)
 | |
|     vel_norm = np.linalg.norm(states[:, :, 1], axis=1)
 | |
|     acc_norm = np.linalg.norm(states[:, :, 2], axis=1)
 | |
|     plt.plot(time, pos_norm, label='Position')
 | |
|     plt.plot(time, vel_norm, label='Velocity')
 | |
|     plt.plot(time, acc_norm, label='Acceleration')
 | |
|     plt.title('State Magnitude Evolution')
 | |
|     plt.xlabel('Time Step')
 | |
|     plt.ylabel('Norm')
 | |
|     plt.legend()
 | |
|     plt.grid(True)
 | |
|     
 | |
|     plt.tight_layout()
 | |
|     plt.savefig(save_dir / 'time_evolution.png')
 | |
|     plt.close()
 | |
|     
 | |
|     # 3. Correlation analysis
 | |
|     plt.figure(figsize=(15, 5))
 | |
|     
 | |
|     # Position-velocity correlation
 | |
|     plt.subplot(1, 3, 1)
 | |
|     for i in range(states.shape[1]):
 | |
|         plt.scatter(states[:, i, 0], states[:, i, 1], alpha=0.5, label=f'State {i+1}')
 | |
|     plt.title('Position-Velocity Correlation')
 | |
|     plt.xlabel('Position')
 | |
|     plt.ylabel('Velocity')
 | |
|     plt.legend()
 | |
|     plt.grid(True)
 | |
|     
 | |
|     # Velocity-acceleration correlation
 | |
|     plt.subplot(1, 3, 2)
 | |
|     for i in range(states.shape[1]):
 | |
|         plt.scatter(states[:, i, 1], states[:, i, 2], alpha=0.5, label=f'State {i+1}')
 | |
|     plt.title('Velocity-Acceleration Correlation')
 | |
|     plt.xlabel('Velocity')
 | |
|     plt.ylabel('Acceleration')
 | |
|     plt.legend()
 | |
|     plt.grid(True)
 | |
|     
 | |
|     # Position-acceleration correlation
 | |
|     plt.subplot(1, 3, 3)
 | |
|     for i in range(states.shape[1]):
 | |
|         plt.scatter(states[:, i, 0], states[:, i, 2], alpha=0.5, label=f'State {i+1}')
 | |
|     plt.title('Position-Acceleration Correlation')
 | |
|     plt.xlabel('Position')
 | |
|     plt.ylabel('Acceleration')
 | |
|     plt.legend()
 | |
|     plt.grid(True)
 | |
|     
 | |
|     plt.tight_layout()
 | |
|     plt.savefig(save_dir / 'state_correlations.png')
 | |
|     plt.close()
 | |
|     
 | |
|     # 4. Taylor expansion analysis
 | |
|     if len(states) > 1:
 | |
|         plt.figure(figsize=(15, 5))
 | |
|         dt = 1  # Assuming unit time steps
 | |
|         
 | |
|         # Position prediction error
 | |
|         plt.subplot(1, 3, 1)
 | |
|         pred_pos = states[:-1, :, 0] + states[:-1, :, 1] * dt
 | |
|         actual_pos = states[1:, :, 0]
 | |
|         pos_error = np.linalg.norm(pred_pos - actual_pos, axis=1)
 | |
|         plt.plot(pos_error)
 | |
|         plt.title('Position Prediction Error')
 | |
|         plt.xlabel('Time Step')
 | |
|         plt.ylabel('Error')
 | |
|         plt.grid(True)
 | |
|         
 | |
|         # Velocity prediction error
 | |
|         plt.subplot(1, 3, 2)
 | |
|         pred_vel = states[:-1, :, 1] + states[:-1, :, 2] * dt
 | |
|         actual_vel = states[1:, :, 1]
 | |
|         vel_error = np.linalg.norm(pred_vel - actual_vel, axis=1)
 | |
|         plt.plot(vel_error)
 | |
|         plt.title('Velocity Prediction Error')
 | |
|         plt.xlabel('Time Step')
 | |
|         plt.ylabel('Error')
 | |
|         plt.grid(True)
 | |
|         
 | |
|         # Total prediction error
 | |
|         plt.subplot(1, 3, 3)
 | |
|         total_error = pos_error + vel_error
 | |
|         plt.plot(total_error)
 | |
|         plt.title('Total Prediction Error')
 | |
|         plt.xlabel('Time Step')
 | |
|         plt.ylabel('Error')
 | |
|         plt.grid(True)
 | |
|         
 | |
|         plt.tight_layout()
 | |
|         plt.savefig(save_dir / 'prediction_analysis.png')
 | |
|         plt.close()
 | |
| 
 | |
| def plot_energy_conservation(data: dict, save_dir: Path):
 | |
|     """Plot energy conservation diagnostics."""
 | |
|     plt.figure(figsize=(15, 10))
 | |
|     
 | |
|     energy = np.array(data['energy_history'])
 | |
|     energy_normalized = energy / energy[0]
 | |
|     
 | |
|     # Energy components
 | |
|     plt.subplot(2, 2, 1)
 | |
|     plt.plot(energy, label='Total')
 | |
|     if 'kinetic_energy' in data and 'potential_energy' in data:
 | |
|         ke = np.array(data['kinetic_energy'])
 | |
|         pe = np.array(data['potential_energy'])
 | |
|         plt.plot(ke, label='Kinetic', alpha=0.7)
 | |
|         plt.plot(pe, label='Potential', alpha=0.7)
 | |
|         plt.plot(ke + pe, '--', label='Sum', alpha=0.7)
 | |
|     plt.title('Energy Components')
 | |
|     plt.xlabel('Time Step')
 | |
|     plt.ylabel('Energy')
 | |
|     plt.legend()
 | |
|     plt.grid(True)
 | |
|     
 | |
|     # Normalized energy
 | |
|     plt.subplot(2, 2, 2)
 | |
|     plt.plot(energy_normalized)
 | |
|     plt.axhline(y=1.0, color='r', linestyle='--', label='Initial Energy')
 | |
|     plt.title('Normalized Total Energy')
 | |
|     plt.xlabel('Time Step')
 | |
|     plt.ylabel('E/E₀')
 | |
|     plt.legend()
 | |
|     plt.grid(True)
 | |
|     
 | |
|     # Energy ratio
 | |
|     if 'kinetic_energy' in data and 'potential_energy' in data:
 | |
|         plt.subplot(2, 2, 3)
 | |
|         energy_ratio = np.array(data['kinetic_energy']) / np.array(data['potential_energy'])
 | |
|         plt.plot(energy_ratio)
 | |
|         plt.axhline(y=1.0, color='r', linestyle='--', label='Equipartition')
 | |
|         plt.title('Kinetic/Potential Energy Ratio')
 | |
|         plt.xlabel('Time Step')
 | |
|         plt.ylabel('KE/PE')
 | |
|         plt.legend()
 | |
|         plt.grid(True)
 | |
|         
 | |
|         # Energy distribution
 | |
|         plt.subplot(2, 2, 4)
 | |
|         plt.hist(energy_ratio, bins=30, density=True, alpha=0.7, label='KE/PE')
 | |
|         plt.axvline(x=1.0, color='r', linestyle='--', label='Equipartition')
 | |
|         plt.title('Energy Ratio Distribution')
 | |
|         plt.xlabel('KE/PE')
 | |
|         plt.ylabel('Density')
 | |
|         plt.legend()
 | |
|         plt.grid(True)
 | |
|     
 | |
|     plt.tight_layout()
 | |
|     plt.savefig(save_dir / 'energy_conservation.png')
 | |
|     plt.close()
 | |
|     
 | |
|     # Additional energy analysis
 | |
|     plt.figure(figsize=(15, 5))
 | |
|     
 | |
|     # Energy fluctuations
 | |
|     plt.subplot(1, 3, 1)
 | |
|     energy_fluct = np.diff(energy_normalized)
 | |
|     plt.hist(energy_fluct, bins=30, density=True)
 | |
|     plt.title('Energy Fluctuations')
 | |
|     plt.xlabel('ΔE/E₀')
 | |
|     plt.ylabel('Density')
 | |
|     plt.grid(True)
 | |
|     
 | |
|     # Running energy statistics
 | |
|     plt.subplot(1, 3, 2)
 | |
|     window = min(20, len(energy))
 | |
|     running_mean = np.convolve(energy_normalized, np.ones(window)/window, mode='valid')
 | |
|     running_std = np.array([np.std(energy_normalized[i:i+window]) 
 | |
|                           for i in range(len(energy_normalized)-window+1)])
 | |
|     plt.plot(running_mean, label='Mean')
 | |
|     plt.fill_between(np.arange(len(running_mean)),
 | |
|                     running_mean - running_std,
 | |
|                     running_mean + running_std,
 | |
|                     alpha=0.2)
 | |
|     plt.axhline(y=1.0, color='r', linestyle='--', label='Initial')
 | |
|     plt.title(f'Running Statistics (window={window})')
 | |
|     plt.xlabel('Time Step')
 | |
|     plt.ylabel('E/E₀')
 | |
|     plt.legend()
 | |
|     plt.grid(True)
 | |
|     
 | |
|     # Energy conservation metric
 | |
|     plt.subplot(1, 3, 3)
 | |
|     conservation_metric = np.abs(energy_normalized - 1.0)
 | |
|     plt.plot(conservation_metric)
 | |
|     plt.title('Energy Conservation Metric')
 | |
|     plt.xlabel('Time Step')
 | |
|     plt.ylabel('|E/E₀ - 1|')
 | |
|     plt.grid(True)
 | |
|     
 | |
|     plt.tight_layout()
 | |
|     plt.savefig(save_dir / 'energy_analysis.png')
 | |
|     plt.close()
 | |
| 
 | |
| def plot_prediction_diagnostics(data: dict, save_dir: Path):
 | |
|     """Plot prediction error diagnostics."""
 | |
|     plt.figure(figsize=(10, 6))
 | |
|     
 | |
|     errors = np.array(data['prediction_error'])
 | |
|     
 | |
|     plt.subplot(2, 1, 1)
 | |
|     plt.plot(errors)
 | |
|     plt.title('Prediction Error Evolution')
 | |
|     plt.grid(True)
 | |
|     
 | |
|     plt.subplot(2, 1, 2)
 | |
|     window = min(20, len(errors))
 | |
|     running_avg = np.convolve(errors, np.ones(window)/window, mode='valid')
 | |
|     plt.plot(running_avg)
 | |
|     plt.title(f'Running Average Error (window={window})')
 | |
|     plt.grid(True)
 | |
|     
 | |
|     plt.tight_layout()
 | |
|     plt.savefig(save_dir / 'prediction_error.png')
 | |
|     plt.close()
 | |
| 
 | |
| def create_animation(data: dict, save_dir: Path, name: str = 'animation'):
 | |
|     """Create animation of system dynamics."""
 | |
|     states = np.array(data['state_history'])
 | |
|     
 | |
|     # Create figure with multiple subplots
 | |
|     fig = plt.figure(figsize=(15, 10))
 | |
|     gs = plt.GridSpec(2, 3, figure=fig)
 | |
|     
 | |
|     # Phase space plot
 | |
|     ax1 = fig.add_subplot(gs[0, :])
 | |
|     # Position space plot
 | |
|     ax2 = fig.add_subplot(gs[1, 0])
 | |
|     # Velocity space plot
 | |
|     ax3 = fig.add_subplot(gs[1, 1])
 | |
|     # Energy plot
 | |
|     ax4 = fig.add_subplot(gs[1, 2])
 | |
|     
 | |
|     # Initialize lines
 | |
|     phase_line, = ax1.plot([], [], 'b-', alpha=0.5)
 | |
|     phase_point, = ax1.plot([], [], 'ro')
 | |
|     pos_lines = [ax2.plot([], [], label=f'x{i+1}')[0] for i in range(states.shape[1])]
 | |
|     vel_lines = [ax3.plot([], [], label=f'v{i+1}')[0] for i in range(states.shape[1])]
 | |
|     
 | |
|     # If energy data is available
 | |
|     if 'energy_history' in data:
 | |
|         energy = np.array(data['energy_history'])
 | |
|         energy_line, = ax4.plot([], [], 'g-', label='Total Energy')
 | |
|         if 'kinetic_energy' in data and 'potential_energy' in data:
 | |
|             ke_line, = ax4.plot([], [], 'r-', alpha=0.7, label='Kinetic')
 | |
|             pe_line, = ax4.plot([], [], 'b-', alpha=0.7, label='Potential')
 | |
|     
 | |
|     # Set up plots
 | |
|     ax1.set_title('Phase Space Evolution')
 | |
|     ax1.set_xlabel('Position')
 | |
|     ax1.set_ylabel('Velocity')
 | |
|     ax1.grid(True)
 | |
|     
 | |
|     ax2.set_title('Position Evolution')
 | |
|     ax2.set_xlabel('Time')
 | |
|     ax2.set_ylabel('Position')
 | |
|     ax2.grid(True)
 | |
|     ax2.legend()
 | |
|     
 | |
|     ax3.set_title('Velocity Evolution')
 | |
|     ax3.set_xlabel('Time')
 | |
|     ax3.set_ylabel('Velocity')
 | |
|     ax3.grid(True)
 | |
|     ax3.legend()
 | |
|     
 | |
|     ax4.set_title('Energy Evolution')
 | |
|     ax4.set_xlabel('Time')
 | |
|     ax4.set_ylabel('Energy')
 | |
|     ax4.grid(True)
 | |
|     ax4.legend()
 | |
|     
 | |
|     # Set axis limits
 | |
|     ax1.set_xlim(np.min(states[:, :, 0])-0.1, np.max(states[:, :, 0])+0.1)
 | |
|     ax1.set_ylim(np.min(states[:, :, 1])-0.1, np.max(states[:, :, 1])+0.1)
 | |
|     
 | |
|     time = np.arange(len(states))
 | |
|     ax2.set_xlim(0, len(states))
 | |
|     ax2.set_ylim(np.min(states[:, :, 0])-0.1, np.max(states[:, :, 0])+0.1)
 | |
|     
 | |
|     ax3.set_xlim(0, len(states))
 | |
|     ax3.set_ylim(np.min(states[:, :, 1])-0.1, np.max(states[:, :, 1])+0.1)
 | |
|     
 | |
|     if 'energy_history' in data:
 | |
|         ax4.set_xlim(0, len(energy))
 | |
|         ax4.set_ylim(np.min(energy)-0.1, np.max(energy)+0.1)
 | |
|     
 | |
|     def init():
 | |
|         phase_line.set_data([], [])
 | |
|         phase_point.set_data([], [])
 | |
|         for line in pos_lines + vel_lines:
 | |
|             line.set_data([], [])
 | |
|         if 'energy_history' in data:
 | |
|             energy_line.set_data([], [])
 | |
|             if 'kinetic_energy' in data:
 | |
|                 ke_line.set_data([], [])
 | |
|                 pe_line.set_data([], [])
 | |
|         return (phase_line, phase_point, *pos_lines, *vel_lines)
 | |
|     
 | |
|     def animate(i):
 | |
|         # Phase space
 | |
|         phase_line.set_data(states[:i+1, 0, 0], states[:i+1, 0, 1])
 | |
|         phase_point.set_data([states[i, 0, 0]], [states[i, 0, 1]])
 | |
|         
 | |
|         # Position and velocity
 | |
|         for j, (pos_line, vel_line) in enumerate(zip(pos_lines, vel_lines)):
 | |
|             pos_line.set_data(time[:i+1], states[:i+1, j, 0])
 | |
|             vel_line.set_data(time[:i+1], states[:i+1, j, 1])
 | |
|         
 | |
|         # Energy
 | |
|         if 'energy_history' in data:
 | |
|             energy_line.set_data(time[:i+1], energy[:i+1])
 | |
|             if 'kinetic_energy' in data:
 | |
|                 ke = data['kinetic_energy']
 | |
|                 pe = data['potential_energy']
 | |
|                 ke_line.set_data(time[:i+1], ke[:i+1])
 | |
|                 pe_line.set_data(time[:i+1], pe[:i+1])
 | |
|         
 | |
|         return (phase_line, phase_point, *pos_lines, *vel_lines)
 | |
|     
 | |
|     # Create animation
 | |
|     anim = animation.FuncAnimation(fig, animate, init_func=init,
 | |
|                                  frames=len(states), interval=50, blit=True)
 | |
|     
 | |
|     # Save animation
 | |
|     anim.save(save_dir / f'{name}.gif', writer='pillow')
 | |
|     plt.close()
 | |
| 
 | |
| @pytest.mark.basic
 | |
| class TestBasicProperties:
 | |
|     """Basic initialization and property tests."""
 | |
|     
 | |
|     def test_initialization(self):
 | |
|         """Test basic initialization."""
 | |
|         state = ContinuousState(
 | |
|             belief_means=np.zeros((2, 3)),
 | |
|             belief_precisions=np.ones((2, 3))
 | |
|         )
 | |
|         assert state.time == 0.0
 | |
|         assert state.dt == 0.01
 | |
|         
 | |
|         # Test initialization with different parameters
 | |
|         state2 = ContinuousState(
 | |
|             belief_means=np.ones((2, 3)),
 | |
|             belief_precisions=2 * np.ones((2, 3)),
 | |
|             dt=0.001
 | |
|         )
 | |
|         assert state2.dt == 0.001
 | |
|         np.testing.assert_array_equal(state2.belief_means, np.ones((2, 3)))
 | |
|         
 | |
|     def test_shift_operator(self):
 | |
|         """Test shift operator properties."""
 | |
|         agent = ContinuousActiveInference(n_states=2, n_obs=2, n_orders=3)
 | |
|         D = agent.D
 | |
|         
 | |
|         # Test with unit vector
 | |
|         x = np.array([1.0, 0.0, 0.0])
 | |
|         Dx = D @ x
 | |
|         np.testing.assert_array_equal(Dx, [0.0, 0.0, 0.0])
 | |
|         
 | |
|         # Test with linear increase
 | |
|         x = np.array([1.0, 2.0, 3.0])
 | |
|         Dx = D @ x
 | |
|         np.testing.assert_array_almost_equal(Dx, [2.0, 6.0, 0.0])
 | |
|         
 | |
|         save_test_diagnostics('shift_operator', {
 | |
|             'operator': D,
 | |
|             'input': x,
 | |
|             'output': Dx
 | |
|         })
 | |
|         
 | |
|     def test_sensory_mapping(self):
 | |
|         """Test basic sensory mapping."""
 | |
|         agent = ContinuousActiveInference(n_states=2, n_obs=2, n_orders=3)
 | |
|         
 | |
|         # Test cases with different magnitudes
 | |
|         test_cases = [
 | |
|             np.array([0.1, 0.2]),    # Small values
 | |
|             np.array([0.01, 0.02]),  # Very small values
 | |
|             np.array([0.0, 0.0]),    # Zero
 | |
|             np.array([-0.1, 0.1]),   # Mixed signs
 | |
|             np.array([1.0, 1.0])     # Unit values
 | |
|         ]
 | |
|         
 | |
|         results = []
 | |
|         for positions in test_cases:
 | |
|             # Initialize state with zeros
 | |
|             states = np.zeros((2, 3))
 | |
|             states[:, 0] = positions  # Set only positions
 | |
|             
 | |
|             # Get observations
 | |
|             obs = agent._sensory_mapping(states)
 | |
|             
 | |
|             # Store results for analysis
 | |
|             results.append({
 | |
|                 'input': positions,
 | |
|                 'output': obs,
 | |
|                 'ratio': obs / positions if not np.allclose(positions, 0) else None
 | |
|             })
 | |
|             
 | |
|             # Basic checks that should always hold
 | |
|             if not np.allclose(positions, 0):
 | |
|                 # Check sign preservation
 | |
|                 np.testing.assert_array_equal(np.sign(obs), np.sign(positions))
 | |
|                 
 | |
|                 # Check magnitude relationship
 | |
|                 # Observations should scale approximately linearly with input
 | |
|                 scale = np.mean(np.abs(obs) / np.abs(positions))
 | |
|                 np.testing.assert_allclose(
 | |
|                     np.abs(obs),
 | |
|                     np.abs(positions) * scale,
 | |
|                     rtol=0.15,  # 15% relative tolerance
 | |
|                     atol=1e-3   # Increased absolute tolerance
 | |
|                 )
 | |
|                 
 | |
|                 # Check that the mapping preserves reasonable bounds
 | |
|                 assert np.all(np.abs(obs) <= 3 * np.abs(positions)), \
 | |
|                     "Observations should not grow unreasonably large"
 | |
|                 assert np.all(np.abs(obs) >= 0.3 * np.abs(positions)), \
 | |
|                     "Observations should not shrink unreasonably small"
 | |
|         
 | |
|         # Analyze scaling consistency across different magnitudes
 | |
|         non_zero_cases = [r for r in results if r['ratio'] is not None]
 | |
|         if non_zero_cases:
 | |
|             # Calculate average scaling factor for each test case
 | |
|             scales = []
 | |
|             for r in non_zero_cases:
 | |
|                 pos = np.array(r['input'])
 | |
|                 obs = np.array(r['output'])
 | |
|                 scale = np.mean(np.abs(obs) / np.abs(pos))
 | |
|                 scales.append(scale)
 | |
|             
 | |
|             # Check that scaling factors are consistent across magnitudes
 | |
|             scales = np.array(scales)
 | |
|             np.testing.assert_allclose(
 | |
|                 scales,
 | |
|                 np.mean(scales),
 | |
|                 rtol=0.15,  # 15% relative tolerance
 | |
|                 atol=1e-3   # Increased absolute tolerance
 | |
|             )
 | |
|         
 | |
|         # Save detailed diagnostics
 | |
|         save_test_diagnostics('sensory_mapping', {
 | |
|             'test_cases': test_cases,
 | |
|             'results': [{
 | |
|                 'input': r['input'].tolist(),
 | |
|                 'output': r['output'].tolist(),
 | |
|                 'ratio': r['ratio'].tolist() if r['ratio'] is not None else None,
 | |
|                 'scale_factor': np.mean(np.abs(r['output']) / np.abs(r['input'])) 
 | |
|                     if r['ratio'] is not None else None
 | |
|             } for r in results],
 | |
|             'mean_scale': float(np.mean(scales)) if non_zero_cases else None,
 | |
|             'scale_std': float(np.std(scales)) if non_zero_cases else None
 | |
|         })
 | |
| 
 | |
| @pytest.mark.single_step
 | |
| class TestSingleStepDynamics:
 | |
|     """Single-step dynamics tests."""
 | |
|     
 | |
|     @pytest.fixture
 | |
|     def agent(self):
 | |
|         """Create test agent."""
 | |
|         return ContinuousActiveInference(
 | |
|             n_states=2,
 | |
|             n_obs=2,
 | |
|             n_orders=3,
 | |
|             dt=0.0001,
 | |
|             alpha=0.001
 | |
|         )
 | |
|     
 | |
|     def test_taylor_expansion_step(self, agent):
 | |
|         """Test single-step Taylor expansion."""
 | |
|         # Initialize with small values
 | |
|         x0 = np.array([0.01, 0.02])
 | |
|         v0 = np.array([0.005, -0.005])
 | |
|         a0 = np.array([-0.001, 0.001])
 | |
|         
 | |
|         agent.state.belief_means[:, 0] = x0
 | |
|         agent.state.belief_means[:, 1] = v0
 | |
|         agent.state.belief_means[:, 2] = a0
 | |
|         
 | |
|         # Predict next state
 | |
|         dt = agent.dt
 | |
|         x_pred = x0 + v0 * dt + 0.5 * a0 * dt**2
 | |
|         v_pred = v0 + a0 * dt
 | |
|         
 | |
|         # Single step
 | |
|         obs = agent._sensory_mapping(agent.state.belief_means)
 | |
|         action = np.zeros(agent.n_states)
 | |
|         agent.step(obs)
 | |
|         
 | |
|         # Compare with predictions
 | |
|         x_actual = agent.state.belief_means[:, 0]
 | |
|         v_actual = agent.state.belief_means[:, 1]
 | |
|         
 | |
|         # Save diagnostics
 | |
|         save_test_diagnostics('taylor_step', {
 | |
|             'initial_state': agent.state.belief_means.copy(),
 | |
|             'final_state': agent.state.belief_means,
 | |
|             'predicted_position': x_pred,
 | |
|             'predicted_velocity': v_pred,
 | |
|             'dt': agent.dt
 | |
|         }, 'single_step')
 | |
|         
 | |
|         # Verify predictions with appropriate tolerances
 | |
|         np.testing.assert_allclose(x_actual, x_pred, rtol=1e-4, atol=1e-5)
 | |
|         np.testing.assert_allclose(v_actual, v_pred, rtol=1e-4, atol=1e-5)
 | |
|     
 | |
|     def test_free_energy_computation(self, agent):
 | |
|         """Test free energy computation for single step."""
 | |
|         # Set initial state
 | |
|         agent.state.belief_means[:, 0] = np.array([0.1, -0.1])
 | |
|         agent.state.belief_means[:, 1] = np.zeros_like(agent.state.belief_means[:, 0])
 | |
|         agent.state.belief_means[:, 2] = np.zeros_like(agent.state.belief_means[:, 0])
 | |
|         
 | |
|         # Track free energy over multiple steps
 | |
|         free_energy_history = []
 | |
|         for _ in range(5):  # Take a few steps to get a history
 | |
|             obs = np.zeros_like(agent.state.belief_means[:, 0])
 | |
|             F = agent._compute_free_energy(obs, agent.state.belief_means)
 | |
|             free_energy_history.append(float(F))  # Convert to float to ensure 1D array
 | |
|             agent.step(obs)
 | |
|         
 | |
|         # Free energy should be positive and finite
 | |
|         assert all(F > 0 for F in free_energy_history)
 | |
|         assert all(np.isfinite(F) for F in free_energy_history)
 | |
|         
 | |
|         save_test_diagnostics('free_energy', {
 | |
|             'state': agent.state.belief_means,
 | |
|             'observation': obs,
 | |
|             'free_energy': np.array(free_energy_history)  # Ensure 1D array
 | |
|         }, 'single_step')
 | |
| 
 | |
| @pytest.mark.multi_step
 | |
| class TestMultiStepEvolution:
 | |
|     """Multi-step evolution tests."""
 | |
|     
 | |
|     @pytest.fixture
 | |
|     def agent(self):
 | |
|         """Create test agent."""
 | |
|         return ContinuousActiveInference(
 | |
|             n_states=2,
 | |
|             n_obs=2,
 | |
|             n_orders=3,
 | |
|             dt=0.0001,
 | |
|             alpha=0.001
 | |
|         )
 | |
|     
 | |
|     def test_belief_consistency(self, agent):
 | |
|         """Test belief updating consistency over multiple steps."""
 | |
|         # Initialize with very small values for better numerical stability
 | |
|         agent.state.belief_means[:, 0] = np.array([0.001, -0.001])
 | |
|         agent.state.belief_means[:, 1] = np.array([0.0005, 0.0005])
 | |
|         agent.state.belief_means[:, 2] = np.array([0.0001, -0.0001])
 | |
|         
 | |
|         # Track evolution
 | |
|         state_history = []
 | |
|         free_energy_history = []
 | |
|         prediction_error_history = []
 | |
|         
 | |
|         # Take smaller steps for better numerical accuracy
 | |
|         for _ in range(10):  # Reduced steps
 | |
|             obs = agent._sensory_mapping(agent.state.belief_means)
 | |
|             pred_obs = agent._sensory_mapping(agent.state.belief_means)
 | |
|             prediction_error = np.mean((obs - pred_obs)**2)
 | |
|             
 | |
|             action, F = agent.step(obs)
 | |
|             
 | |
|             state_history.append(agent.state.belief_means.copy())
 | |
|             free_energy_history.append(F)
 | |
|             prediction_error_history.append(prediction_error)
 | |
|         
 | |
|         save_test_diagnostics('belief_evolution', {
 | |
|             'state_history': np.array(state_history),
 | |
|             'free_energy': np.array(free_energy_history),
 | |
|             'prediction_error': np.array(prediction_error_history)
 | |
|         }, 'multi_step')
 | |
|         
 | |
|         # Verify consistency with more lenient tolerances
 | |
|         state_history = np.array(state_history)
 | |
|         for t in range(1, len(state_history)):
 | |
|             # Velocity should approximate position derivative
 | |
|             dx = (state_history[t, :, 0] - state_history[t-1, :, 0]) / agent.dt
 | |
|             v_avg = 0.5 * (state_history[t, :, 1] + state_history[t-1, :, 1])
 | |
|             # Use more lenient tolerances for numerical stability
 | |
|             np.testing.assert_allclose(dx, v_avg, rtol=0.5, atol=1e-2)
 | |
|     
 | |
|     def test_convergence(self, agent):
 | |
|         """Test convergence to target state."""
 | |
|         # Set initial state away from target
 | |
|         x0 = np.array([0.1, -0.1])
 | |
|         agent.state.belief_means[:, 0] = x0
 | |
|         agent.state.belief_means[:, 1] = np.zeros_like(x0)
 | |
|         agent.state.belief_means[:, 2] = np.zeros_like(x0)
 | |
|         
 | |
|         # Target at origin
 | |
|         target = np.zeros_like(x0)
 | |
|         
 | |
|         # Track evolution
 | |
|         state_history = []
 | |
|         distance_history = []
 | |
|         
 | |
|         for _ in range(50):
 | |
|             obs = target  # Observation is the target
 | |
|             action, _ = agent.step(obs)
 | |
|             
 | |
|             state_history.append(agent.state.belief_means.copy())
 | |
|             distance = np.linalg.norm(agent.state.belief_means[:, 0] - target)
 | |
|             distance_history.append(distance)
 | |
|         
 | |
|         save_test_diagnostics('convergence', {
 | |
|             'state_history': np.array(state_history),
 | |
|             'distance_history': np.array(distance_history),
 | |
|             'target': target
 | |
|         }, 'multi_step')
 | |
|         
 | |
|         # Verify convergence
 | |
|         assert distance_history[-1] < distance_history[0]
 | |
| 
 | |
| @pytest.mark.complex
 | |
| class TestComplexDynamics:
 | |
|     """Tests for complex dynamical behaviors."""
 | |
|     
 | |
|     @pytest.fixture
 | |
|     def agent(self):
 | |
|         """Create test agent."""
 | |
|         return ContinuousActiveInference(
 | |
|             n_states=2,
 | |
|             n_obs=2,
 | |
|             n_orders=3,
 | |
|             dt=0.0001,
 | |
|             alpha=0.001
 | |
|         )
 | |
|     
 | |
|     def test_harmonic_motion(self, agent):
 | |
|         """Test harmonic motion with minimal configuration."""
 | |
|         # Initialize with small harmonic motion
 | |
|         omega = 0.1
 | |
|         agent.state.belief_means[:, 0] = np.array([0.01, 0.0])
 | |
|         agent.state.belief_means[:, 1] = np.array([0.0, 0.005])
 | |
|         agent.state.belief_means[:, 2] = np.array([-0.0005, 0.0])
 | |
|         
 | |
|         # Track evolution
 | |
|         state_history = []
 | |
|         energy_history = []
 | |
|         ke_history = []
 | |
|         pe_history = []
 | |
|         
 | |
|         for t in range(200):  # Increased steps
 | |
|             obs = agent._sensory_mapping(agent.state.belief_means)
 | |
|             action = np.zeros(agent.n_states)
 | |
|             agent.step(obs)
 | |
|             
 | |
|             if t % 2 == 0:  # Record every other step
 | |
|                 state_history.append(agent.state.belief_means.copy())
 | |
|                 ke = 0.5 * (agent.state.belief_means[:, 1]**2).sum()
 | |
|                 pe = 0.5 * omega**2 * (agent.state.belief_means[:, 0]**2).sum()
 | |
|                 ke_history.append(ke)
 | |
|                 pe_history.append(pe)
 | |
|                 energy_history.append(ke + pe)
 | |
|         
 | |
|         save_test_diagnostics('harmonic_motion', {
 | |
|             'state_history': np.array(state_history),
 | |
|             'energy_history': np.array(energy_history),
 | |
|             'kinetic_energy': np.array(ke_history),
 | |
|             'potential_energy': np.array(pe_history),
 | |
|             'omega': omega
 | |
|         }, 'complex')
 | |
|         
 | |
|         # Verify approximate energy conservation
 | |
|         energy_normalized = np.array(energy_history) / energy_history[0]
 | |
|         assert np.std(energy_normalized) < 0.3
 | |
|     
 | |
|     def test_driven_oscillator(self, agent):
 | |
|         """Test response to periodic driving force."""
 | |
|         # Initialize at rest
 | |
|         agent.state.belief_means[:, 0] = np.zeros(2)
 | |
|         agent.state.belief_means[:, 1] = np.zeros(2)
 | |
|         agent.state.belief_means[:, 2] = np.zeros(2)
 | |
|         
 | |
|         # Driving parameters
 | |
|         drive_freq = 0.2
 | |
|         amplitude = 0.001
 | |
|         
 | |
|         # Track evolution
 | |
|         state_history = []
 | |
|         drive_history = []
 | |
|         
 | |
|         for t in range(300):  # Long enough to see resonance
 | |
|             time = t * agent.dt
 | |
|             # Periodic driving force
 | |
|             drive = amplitude * np.array([np.sin(drive_freq * time), 0.0])
 | |
|             
 | |
|             obs = agent._sensory_mapping(agent.state.belief_means)
 | |
|             action = drive
 | |
|             agent.step(obs)
 | |
|             
 | |
|             if t % 2 == 0:
 | |
|                 state_history.append(agent.state.belief_means.copy())
 | |
|                 drive_history.append(drive.copy())
 | |
|         
 | |
|         save_test_diagnostics('driven_oscillator', {
 | |
|             'state_history': np.array(state_history),
 | |
|             'drive_history': np.array(drive_history),
 | |
|             'drive_frequency': drive_freq,
 | |
|             'drive_amplitude': amplitude
 | |
|         }, 'complex')
 | |
| 
 | |
| @pytest.mark.complex
 | |
| class TestGeneralizedCoordinates:
 | |
|     """Tests for generalized coordinates properties."""
 | |
|     
 | |
|     @pytest.fixture
 | |
|     def agent(self):
 | |
|         """Create test agent."""
 | |
|         return ContinuousActiveInference(
 | |
|             n_states=2,
 | |
|             n_obs=2,
 | |
|             n_orders=3,
 | |
|             dt=0.0001,
 | |
|             alpha=0.001
 | |
|         )
 | |
|     
 | |
|     def test_generalized_coordinates_consistency(self, agent):
 | |
|         """Test consistency of generalized coordinates evolution."""
 | |
|         # Initialize with smaller oscillatory motion for better numerical stability
 | |
|         agent.state.belief_means[:, 0] = np.array([0.005, 0.0])    # Position (reduced magnitude)
 | |
|         agent.state.belief_means[:, 1] = np.array([0.0, 0.005])    # Velocity (reduced magnitude)
 | |
|         agent.state.belief_means[:, 2] = np.array([-0.005, 0.0])   # Acceleration (reduced magnitude)
 | |
|         
 | |
|         # Track evolution
 | |
|         state_history = []
 | |
|         derivatives = []
 | |
|         
 | |
|         # First, collect all states
 | |
|         for t in range(30):  # Reduced number of steps
 | |
|             # Take step with reduced noise
 | |
|             obs = agent._sensory_mapping(agent.state.belief_means)
 | |
|             obs += np.random.normal(0, 1e-5, obs.shape)  # Very small observation noise
 | |
|             agent.step(obs)
 | |
|             state_history.append(agent.state.belief_means.copy())
 | |
|         
 | |
|         # Then calculate derivatives with all states available
 | |
|         state_history = np.array(state_history)  # Convert to numpy array for easier indexing
 | |
|         dt = agent.dt
 | |
|         
 | |
|         for t in range(2, len(state_history)-2):  # Start at 2 and end 2 before the end for 4th order
 | |
|             pos = state_history[t, :, 0]
 | |
|             vel = state_history[t, :, 1]
 | |
|             acc = state_history[t, :, 2]
 | |
|             
 | |
|             # 4th order central difference coefficients
 | |
|             c1 = 1.0 / 12.0
 | |
|             c2 = -2.0 / 3.0
 | |
|             c3 = 2.0 / 3.0
 | |
|             c4 = -1.0 / 12.0
 | |
|             
 | |
|             # Velocity computation (4th order)
 | |
|             num_vel = (
 | |
|                 c1 * state_history[t-2, :, 0] +
 | |
|                 c2 * state_history[t-1, :, 0] +
 | |
|                 c3 * state_history[t+1, :, 0] +
 | |
|                 c4 * state_history[t+2, :, 0]
 | |
|             ) / dt
 | |
|             
 | |
|             # Acceleration computation (4th order)
 | |
|             num_acc = (
 | |
|                 c1 * state_history[t-2, :, 1] +
 | |
|                 c2 * state_history[t-1, :, 1] +
 | |
|                 c3 * state_history[t+1, :, 1] +
 | |
|                 c4 * state_history[t+2, :, 1]
 | |
|             ) / dt
 | |
|             
 | |
|             derivatives.append({
 | |
|                 'velocity': vel,
 | |
|                 'num_velocity': num_vel,
 | |
|                 'acceleration': acc,
 | |
|                 'num_acceleration': num_acc,
 | |
|                 'time': t * dt
 | |
|             })
 | |
|         
 | |
|         # Save diagnostics with animation
 | |
|         diagnostic_data = {
 | |
|             'state_history': state_history,
 | |
|             'derivatives': derivatives,
 | |
|             'dt': agent.dt
 | |
|         }
 | |
|         save_dir = TEST_OUTPUT_DIR / 'complex' / 'generalized_coordinates'
 | |
|         save_dir.mkdir(parents=True, exist_ok=True)
 | |
|         
 | |
|         # Create animation
 | |
|         create_animation(diagnostic_data, save_dir, 'generalized_coordinates')
 | |
|         
 | |
|         # Plot derivative comparisons
 | |
|         plt.figure(figsize=(15, 10))
 | |
|         
 | |
|         # Velocity comparison
 | |
|         plt.subplot(2, 2, 1)
 | |
|         times = [d['time'] for d in derivatives]
 | |
|         vel_actual = np.array([d['velocity'] for d in derivatives])
 | |
|         vel_num = np.array([d['num_velocity'] for d in derivatives])
 | |
|         
 | |
|         for i in range(vel_actual.shape[1]):
 | |
|             plt.plot(times, vel_actual[:, i], 'b-', label=f'Actual Vel {i+1}')
 | |
|             plt.plot(times, vel_num[:, i], 'r--', label=f'Numerical Vel {i+1}')
 | |
|         plt.title('Velocity Comparison')
 | |
|         plt.xlabel('Time')
 | |
|         plt.ylabel('Velocity')
 | |
|         plt.legend()
 | |
|         plt.grid(True)
 | |
|         
 | |
|         # Acceleration comparison
 | |
|         plt.subplot(2, 2, 2)
 | |
|         acc_actual = np.array([d['acceleration'] for d in derivatives])
 | |
|         acc_num = np.array([d['num_acceleration'] for d in derivatives])
 | |
|         
 | |
|         for i in range(acc_actual.shape[1]):
 | |
|             plt.plot(times, acc_actual[:, i], 'b-', label=f'Actual Acc {i+1}')
 | |
|             plt.plot(times, acc_num[:, i], 'r--', label=f'Numerical Acc {i+1}')
 | |
|         plt.title('Acceleration Comparison')
 | |
|         plt.xlabel('Time')
 | |
|         plt.ylabel('Acceleration')
 | |
|         plt.legend()
 | |
|         plt.grid(True)
 | |
|         
 | |
|         # Velocity error
 | |
|         plt.subplot(2, 2, 3)
 | |
|         vel_error = np.linalg.norm(vel_actual - vel_num, axis=1)
 | |
|         plt.plot(times, vel_error)
 | |
|         plt.title('Velocity Error')
 | |
|         plt.xlabel('Time')
 | |
|         plt.ylabel('Error Magnitude')
 | |
|         plt.grid(True)
 | |
|         
 | |
|         # Acceleration error
 | |
|         plt.subplot(2, 2, 4)
 | |
|         acc_error = np.linalg.norm(acc_actual - acc_num, axis=1)
 | |
|         plt.plot(times, acc_error)
 | |
|         plt.title('Acceleration Error')
 | |
|         plt.xlabel('Time')
 | |
|         plt.ylabel('Error Magnitude')
 | |
|         plt.grid(True)
 | |
|         
 | |
|         plt.tight_layout()
 | |
|         plt.savefig(save_dir / 'derivative_analysis.png')
 | |
|         plt.close()
 | |
|         
 | |
|         # Additional analysis plots
 | |
|         plt.figure(figsize=(15, 5))
 | |
|         
 | |
|         # Relative error distribution
 | |
|         plt.subplot(1, 3, 1)
 | |
|         rel_vel_error = np.abs((vel_actual - vel_num) / (np.abs(vel_actual) + 1e-10))
 | |
|         plt.hist(rel_vel_error.flatten(), bins=30, alpha=0.5, label='Velocity')
 | |
|         rel_acc_error = np.abs((acc_actual - acc_num) / (np.abs(acc_actual) + 1e-10))
 | |
|         plt.hist(rel_acc_error.flatten(), bins=30, alpha=0.5, label='Acceleration')
 | |
|         plt.title('Relative Error Distribution')
 | |
|         plt.xlabel('Relative Error')
 | |
|         plt.ylabel('Count')
 | |
|         plt.legend()
 | |
|         plt.grid(True)
 | |
|         
 | |
|         # Error correlation
 | |
|         plt.subplot(1, 3, 2)
 | |
|         plt.scatter(vel_error, acc_error, alpha=0.5)
 | |
|         plt.title('Velocity vs Acceleration Error')
 | |
|         plt.xlabel('Velocity Error')
 | |
|         plt.ylabel('Acceleration Error')
 | |
|         plt.grid(True)
 | |
|         
 | |
|         # Running error average
 | |
|         plt.subplot(1, 3, 3)
 | |
|         window = min(5, len(vel_error))
 | |
|         vel_running = np.convolve(vel_error, np.ones(window)/window, mode='valid')
 | |
|         acc_running = np.convolve(acc_error, np.ones(window)/window, mode='valid')
 | |
|         plt.plot(vel_running, label='Velocity')
 | |
|         plt.plot(acc_running, label='Acceleration')
 | |
|         plt.title(f'Running Average Error (window={window})')
 | |
|         plt.xlabel('Time Step')
 | |
|         plt.ylabel('Error')
 | |
|         plt.legend()
 | |
|         plt.grid(True)
 | |
|         
 | |
|         plt.tight_layout()
 | |
|         plt.savefig(save_dir / 'error_analysis.png')
 | |
|         plt.close()
 | |
|         
 | |
|         # Verify consistency between actual and numerical derivatives
 | |
|         # Use more lenient tolerances and focus on stable middle region
 | |
|         stable_start = 2  # Skip initial transients
 | |
|         stable_end = min(10, len(derivatives))  # Use middle region
 | |
|         
 | |
|         for t in range(stable_start, stable_end):
 | |
|             # Velocity consistency with higher tolerance
 | |
|             np.testing.assert_allclose(
 | |
|                 derivatives[t]['velocity'],
 | |
|                 derivatives[t]['num_velocity'],
 | |
|                 rtol=0.5,  # 50% relative tolerance
 | |
|                 atol=0.05  # Increased absolute tolerance
 | |
|             )
 | |
|             
 | |
|             # Acceleration consistency with higher tolerance
 | |
|             np.testing.assert_allclose(
 | |
|                 derivatives[t]['acceleration'],
 | |
|                 derivatives[t]['num_acceleration'],
 | |
|                 rtol=0.5,  # 50% relative tolerance
 | |
|                 atol=0.05  # Increased absolute tolerance
 | |
|             )
 | |
|     
 | |
|     def test_taylor_expansion_prediction(self, agent):
 | |
|         """Test prediction accuracy using Taylor expansion."""
 | |
|         # Initialize with simple motion
 | |
|         agent.state.belief_means[:, 0] = np.array([0.01, 0.0])
 | |
|         agent.state.belief_means[:, 1] = np.array([0.005, 0.005])
 | |
|         agent.state.belief_means[:, 2] = np.array([0.001, -0.001])
 | |
|         
 | |
|         # Track predictions and actual values
 | |
|         predictions = []
 | |
|         actuals = []
 | |
|         
 | |
|         for t in range(20):  # Reduced steps for better accuracy
 | |
|             # Current state
 | |
|             x = agent.state.belief_means[:, 0]
 | |
|             v = agent.state.belief_means[:, 1]
 | |
|             a = agent.state.belief_means[:, 2]
 | |
|             
 | |
|             # Predict future state using Taylor expansion
 | |
|             dt = agent.dt
 | |
|             x_pred = x + v * dt + 0.5 * a * dt**2
 | |
|             v_pred = v + a * dt
 | |
|             
 | |
|             # Store prediction
 | |
|             predictions.append({
 | |
|                 'position': x_pred,
 | |
|                 'velocity': v_pred,
 | |
|                 'time': t * dt
 | |
|             })
 | |
|             
 | |
|             # Take step
 | |
|             obs = agent._sensory_mapping(agent.state.belief_means)
 | |
|             agent.step(obs)
 | |
|             
 | |
|             # Store actual
 | |
|             actuals.append({
 | |
|                 'position': agent.state.belief_means[:, 0],
 | |
|                 'velocity': agent.state.belief_means[:, 1],
 | |
|                 'time': (t + 1) * dt
 | |
|             })
 | |
|         
 | |
|         # Save diagnostics
 | |
|         diagnostic_data = {
 | |
|             'predictions': predictions,
 | |
|             'actuals': actuals,
 | |
|             'dt': agent.dt
 | |
|         }
 | |
|         save_dir = TEST_OUTPUT_DIR / 'complex' / 'taylor_prediction'
 | |
|         save_dir.mkdir(parents=True, exist_ok=True)
 | |
|         
 | |
|         # Plot detailed prediction analysis
 | |
|         plt.figure(figsize=(15, 10))
 | |
|         
 | |
|         # Position prediction vs actual
 | |
|         plt.subplot(2, 2, 1)
 | |
|         times = [p['time'] for p in predictions]
 | |
|         pred_pos = np.array([p['position'] for p in predictions])
 | |
|         actual_pos = np.array([a['position'] for a in actuals])
 | |
|         
 | |
|         for i in range(pred_pos.shape[1]):
 | |
|             plt.plot(times, pred_pos[:, i], 'b-', label=f'Predicted Pos {i+1}')
 | |
|             plt.plot(times, actual_pos[:, i], 'r--', label=f'Actual Pos {i+1}')
 | |
|         plt.title('Position: Prediction vs Actual')
 | |
|         plt.xlabel('Time')
 | |
|         plt.ylabel('Position')
 | |
|         plt.legend()
 | |
|         plt.grid(True)
 | |
|         
 | |
|         # Velocity prediction vs actual
 | |
|         plt.subplot(2, 2, 2)
 | |
|         pred_vel = np.array([p['velocity'] for p in predictions])
 | |
|         actual_vel = np.array([a['velocity'] for a in actuals])
 | |
|         
 | |
|         for i in range(pred_vel.shape[1]):
 | |
|             plt.plot(times, pred_vel[:, i], 'b-', label=f'Predicted Vel {i+1}')
 | |
|             plt.plot(times, actual_vel[:, i], 'r--', label=f'Actual Vel {i+1}')
 | |
|         plt.title('Velocity: Prediction vs Actual')
 | |
|         plt.xlabel('Time')
 | |
|         plt.ylabel('Velocity')
 | |
|         plt.legend()
 | |
|         plt.grid(True)
 | |
|         
 | |
|         # Position error
 | |
|         plt.subplot(2, 2, 3)
 | |
|         pos_errors = np.linalg.norm(pred_pos - actual_pos, axis=1)
 | |
|         plt.plot(times, pos_errors)
 | |
|         plt.title('Position Prediction Error')
 | |
|         plt.xlabel('Time')
 | |
|         plt.ylabel('Error Magnitude')
 | |
|         plt.grid(True)
 | |
|         
 | |
|         # Velocity error
 | |
|         plt.subplot(2, 2, 4)
 | |
|         vel_errors = np.linalg.norm(pred_vel - actual_vel, axis=1)
 | |
|         plt.plot(times, vel_errors)
 | |
|         plt.title('Velocity Prediction Error')
 | |
|         plt.xlabel('Time')
 | |
|         plt.ylabel('Error Magnitude')
 | |
|         plt.grid(True)
 | |
|         
 | |
|         plt.tight_layout()
 | |
|         plt.savefig(save_dir / 'prediction_analysis.png')
 | |
|         plt.close()
 | |
|         
 | |
|         # Verify prediction accuracy for early time steps
 | |
|         early_steps = min(5, len(pos_errors))
 | |
|         assert np.mean(pos_errors[:early_steps]) < 1e-3, "Position prediction error too large"
 | |
|         assert np.mean(vel_errors[:early_steps]) < 1e-3, "Velocity prediction error too large"
 | |
|     
 | |
|     def test_generalized_coordinates_energy(self, agent):
 | |
|         """Test energy conservation in generalized coordinates."""
 | |
|         # Initialize with conservative oscillator
 | |
|         agent.state.belief_means[:, 0] = np.array([0.01, 0.0])  # Position
 | |
|         agent.state.belief_means[:, 1] = np.array([0.0, 0.01])  # Velocity
 | |
|         agent.state.belief_means[:, 2] = np.array([-0.01, 0.0]) # Acceleration
 | |
|         
 | |
|         # Track energies
 | |
|         energies = []
 | |
|         state_history = []
 | |
|         
 | |
|         for t in range(50):
 | |
|             # Calculate energies
 | |
|             pos = agent.state.belief_means[:, 0]
 | |
|             vel = agent.state.belief_means[:, 1]
 | |
|             
 | |
|             # Simple harmonic oscillator energies
 | |
|             ke = 0.5 * np.sum(vel**2)  # Kinetic energy
 | |
|             pe = 0.5 * np.sum(pos**2)  # Potential energy
 | |
|             total = ke + pe
 | |
|             
 | |
|             energies.append({
 | |
|                 'kinetic': ke,
 | |
|                 'potential': pe,
 | |
|                 'total': total,
 | |
|                 'time': t * agent.dt
 | |
|             })
 | |
|             
 | |
|             state_history.append(agent.state.belief_means.copy())
 | |
|             
 | |
|             # Take step
 | |
|             obs = agent._sensory_mapping(agent.state.belief_means)
 | |
|             agent.step(obs)
 | |
|         
 | |
|         # Save diagnostics
 | |
|         save_dir = TEST_OUTPUT_DIR / 'complex' / 'energy_conservation'
 | |
|         save_dir.mkdir(parents=True, exist_ok=True)
 | |
|         
 | |
|         # Plot energy analysis
 | |
|         plt.figure(figsize=(15, 10))
 | |
|         
 | |
|         # Energy components
 | |
|         plt.subplot(2, 2, 1)
 | |
|         times = [e['time'] for e in energies]
 | |
|         ke_vals = [e['kinetic'] for e in energies]
 | |
|         pe_vals = [e['potential'] for e in energies]
 | |
|         total_vals = [e['total'] for e in energies]
 | |
|         
 | |
|         plt.plot(times, ke_vals, 'r-', label='Kinetic')
 | |
|         plt.plot(times, pe_vals, 'b-', label='Potential')
 | |
|         plt.plot(times, total_vals, 'k--', label='Total')
 | |
|         plt.title('Energy Components')
 | |
|         plt.xlabel('Time')
 | |
|         plt.ylabel('Energy')
 | |
|         plt.legend()
 | |
|         plt.grid(True)
 | |
|         
 | |
|         # Normalized total energy
 | |
|         plt.subplot(2, 2, 2)
 | |
|         norm_energy = np.array(total_vals) / total_vals[0]
 | |
|         plt.plot(times, norm_energy)
 | |
|         plt.axhline(y=1.0, color='r', linestyle='--', label='Initial Energy')
 | |
|         plt.title('Normalized Total Energy')
 | |
|         plt.xlabel('Time')
 | |
|         plt.ylabel('E/E₀')
 | |
|         plt.legend()
 | |
|         plt.grid(True)
 | |
|         
 | |
|         # Energy ratio
 | |
|         plt.subplot(2, 2, 3)
 | |
|         energy_ratio = np.array(ke_vals) / np.array(pe_vals)
 | |
|         plt.plot(times, energy_ratio)
 | |
|         plt.axhline(y=1.0, color='r', linestyle='--', label='Equipartition')
 | |
|         plt.title('Kinetic/Potential Energy Ratio')
 | |
|         plt.xlabel('Time')
 | |
|         plt.ylabel('KE/PE')
 | |
|         plt.legend()
 | |
|         plt.grid(True)
 | |
|         
 | |
|         # Phase space with energy contours
 | |
|         plt.subplot(2, 2, 4)
 | |
|         states = np.array(state_history)
 | |
|         plt.plot(states[:, 0, 0], states[:, 0, 1], 'b-', label='Trajectory')
 | |
|         plt.plot(states[0, 0, 0], states[0, 0, 1], 'go', label='Start')
 | |
|         plt.plot(states[-1, 0, 0], states[-1, 0, 1], 'ro', label='End')
 | |
|         
 | |
|         # Add energy contours
 | |
|         x = np.linspace(-0.02, 0.02, 100)
 | |
|         y = np.linspace(-0.02, 0.02, 100)
 | |
|         X, Y = np.meshgrid(x, y)
 | |
|         E = 0.5 * (X**2 + Y**2)
 | |
|         plt.contour(X, Y, E, levels=10, alpha=0.3, colors='k')
 | |
|         
 | |
|         plt.title('Phase Space with Energy Contours')
 | |
|         plt.xlabel('Position')
 | |
|         plt.ylabel('Velocity')
 | |
|         plt.legend()
 | |
|         plt.grid(True)
 | |
|         
 | |
|         plt.tight_layout()
 | |
|         plt.savefig(save_dir / 'energy_analysis.png')
 | |
|         plt.close()
 | |
|         
 | |
|         # Create animation with energy evolution
 | |
|         diagnostic_data = {
 | |
|             'state_history': states,
 | |
|             'energy_history': total_vals,
 | |
|             'kinetic_energy': ke_vals,
 | |
|             'potential_energy': pe_vals
 | |
|         }
 | |
|         create_animation(diagnostic_data, save_dir, 'energy_evolution')
 | |
|         
 | |
|         # Verify energy conservation
 | |
|         # Use early time steps for stricter verification
 | |
|         early_steps = min(10, len(norm_energy))
 | |
|         assert np.std(norm_energy[:early_steps]) < 0.1, "Energy not conserved in early evolution"
 | |
|         assert np.mean(np.abs(energy_ratio[:early_steps] - 1.0)) < 0.2, "Significant deviation from equipartition"
 | |
| 
 | |
| if __name__ == '__main__':
 | |
|     pytest.main([__file__, '-v', '--tb=short'])  | 
