| type |
id |
created |
modified |
tags |
aliases |
complexity |
processing_priority |
semantic_relations |
| implementation |
path_integral_implementations_001 |
2024-03-15 |
2024-03-15 |
| numerical-methods |
| algorithms |
| path-integrals |
| implementation |
| computation |
|
| path-integral-algorithms |
| numerical-path-integrals |
|
advanced |
1 |
| type |
links |
| implements |
| path_integral_free_energy |
|
|
|
|
|
|
|
|
| type |
links |
| supports |
|
|
| continuous_time_active_inference |
|
|
|
|
|
Path Integral Implementation Methods
Numerical Integration Methods
1. Symplectic Integration
class SymplecticPathIntegrator:
"""Symplectic integration for path integrals"""
def __init__(self, order=4):
self.order = order
self.coefficients = self._get_coefficients()
def _get_coefficients(self):
"""Get coefficients for symplectic integration"""
if self.order == 2:
return {'c': [0.5], 'd': [1.0]}
elif self.order == 4:
# Forest-Ruth coefficients
w = 1.0 / (2 - 2**(1/3))
return {
'c': [w/2, (1-w)/2, (1-w)/2, w/2],
'd': [w, 1-2*w, w]
}
def integrate(self, hamiltonian, initial_state, dt, steps):
"""Perform symplectic integration"""
state = initial_state.copy()
trajectory = [state]
for _ in range(steps):
# Position updates
for c in self.coefficients['c']:
state[::2] += c * dt * hamiltonian.momentum_gradient(state)
# Momentum updates
for d in self.coefficients['d']:
state[1::2] -= d * dt * hamiltonian.position_gradient(state)
trajectory.append(state.copy())
return np.array(trajectory)
2. Stochastic Integration
class StochasticPathIntegrator:
"""Stochastic integration for path integrals"""
def __init__(self, method='milstein'):
self.method = method
self.noise_generator = NoiseGenerator()
def integrate_sde(self, drift, diffusion, initial_state, dt, steps):
"""Integrate stochastic differential equation"""
state = initial_state.copy()
trajectory = [state]
for _ in range(steps):
dW = self.noise_generator.wiener_increment(dt)
if self.method == 'euler':
# Euler-Maruyama method
state += drift(state)*dt + diffusion(state)*dW
elif self.method == 'milstein':
# Milstein method
state += (drift(state)*dt +
diffusion(state)*dW +
0.5*diffusion(state)*
diffusion.derivative(state)*
(dW**2 - dt))
trajectory.append(state.copy())
return np.array(trajectory)
3. Path Sampling
class PathSampler:
"""Path sampling methods for path integrals"""
def __init__(self, n_samples=1000):
self.n_samples = n_samples
self.integrator = StochasticPathIntegrator()
def sample_paths(self, drift, diffusion, initial_state, dt, steps):
"""Sample multiple paths"""
paths = []
for _ in range(self.n_samples):
path = self.integrator.integrate_sde(
drift, diffusion, initial_state, dt, steps)
paths.append(path)
return np.array(paths)
def importance_sampling(self, action_functional, paths):
"""Perform importance sampling of paths"""
# Compute action for each path
actions = np.array([action_functional(path) for path in paths])
# Compute weights
weights = np.exp(-actions)
weights /= np.sum(weights)
# Resample paths
indices = np.random.choice(
len(paths), size=self.n_samples, p=weights)
resampled_paths = paths[indices]
return resampled_paths, weights
Optimization Methods
1. Path Optimization
class PathOptimizer:
"""Optimization methods for path integrals"""
def __init__(self, method='natural_gradient'):
self.method = method
self.metric_computer = MetricComputer()
def optimize_path(self, initial_path, action_functional, n_steps):
"""Optimize path using gradient descent"""
path = initial_path.copy()
for _ in range(n_steps):
# Compute gradient of action
gradient = self.compute_action_gradient(
path, action_functional)
if self.method == 'natural_gradient':
# Compute Fisher metric
metric = self.metric_computer.fisher_metric(path)
# Natural gradient update
path -= np.linalg.solve(metric, gradient)
else:
# Standard gradient descent
path -= gradient
return path
def compute_action_gradient(self, path, action_functional):
"""Compute gradient of action functional"""
eps = 1e-6
gradient = np.zeros_like(path)
for i in range(len(path)):
path_plus = path.copy()
path_plus[i] += eps
path_minus = path.copy()
path_minus[i] -= eps
gradient[i] = (action_functional(path_plus) -
action_functional(path_minus)) / (2*eps)
return gradient
2. Precision Optimization
class PrecisionOptimizer:
"""Optimization methods for precision parameters"""
def __init__(self, learning_rate=0.01):
self.learning_rate = learning_rate
def optimize_precision(self, paths, prediction_errors, n_steps):
"""Optimize precision parameters"""
precision = np.eye(prediction_errors.shape[1])
for _ in range(n_steps):
# Compute gradient
gradient = self.compute_precision_gradient(
paths, prediction_errors, precision)
# Update precision
precision += self.learning_rate * gradient
# Ensure positive definiteness
precision = 0.5 * (precision + precision.T)
eigvals, eigvecs = np.linalg.eigh(precision)
eigvals = np.maximum(eigvals, 1e-6)
precision = eigvecs @ np.diag(eigvals) @ eigvecs.T
return precision
Implementation Utilities
1. Numerical Differentiation
class NumericalDifferentiator:
"""Numerical differentiation utilities"""
def __init__(self, method='central'):
self.method = method
def derivative(self, func, x, eps=1e-6):
"""Compute numerical derivative"""
if self.method == 'central':
return (func(x + eps) - func(x - eps)) / (2*eps)
elif self.method == 'forward':
return (func(x + eps) - func(x)) / eps
elif self.method == 'backward':
return (func(x) - func(x - eps)) / eps
def jacobian(self, func, x, eps=1e-6):
"""Compute Jacobian matrix"""
n = len(x)
jac = np.zeros((n, n))
for i in range(n):
x_plus = x.copy()
x_plus[i] += eps
x_minus = x.copy()
x_minus[i] -= eps
jac[:, i] = (func(x_plus) - func(x_minus)) / (2*eps)
return jac
2. Metric Computation
class MetricComputer:
"""Computation of geometric metrics"""
def __init__(self):
self.differentiator = NumericalDifferentiator()
def fisher_metric(self, path, log_prob_func):
"""Compute Fisher information metric"""
# Compute Jacobian of log probability
jac = self.differentiator.jacobian(log_prob_func, path)
# Fisher metric is expected outer product
metric = jac.T @ jac
return metric
def path_metric(self, path, lagrangian):
"""Compute path space metric"""
# Compute second derivatives of Lagrangian
metric = np.zeros((len(path), len(path)))
for i in range(len(path)):
for j in range(len(path)):
metric[i,j] = self.differentiator.derivative(
lambda x: self.differentiator.derivative(
lambda y: lagrangian(path, i, j),
x
),
path[i]
)
return metric
3. Action Functionals
class ActionFunctionals:
"""Collection of action functionals"""
def __init__(self):
self.differentiator = NumericalDifferentiator()
def classical_action(self, path, lagrangian, dt):
"""Compute classical action"""
action = 0.0
for i in range(len(path)-1):
velocity = (path[i+1] - path[i]) / dt
action += lagrangian(path[i], velocity) * dt
return action
def quantum_action(self, path, potential, mass, dt):
"""Compute quantum action"""
action = 0.0
for i in range(len(path)-1):
# Kinetic term
velocity = (path[i+1] - path[i]) / dt
kinetic = 0.5 * mass * np.sum(velocity**2)
# Potential term
potential_energy = potential(path[i])
action += (kinetic - potential_energy) * dt
return action
Validation Methods
1. Conservation Laws
class ConservationChecker:
"""Check conservation laws"""
def __init__(self, tolerance=1e-6):
self.tolerance = tolerance
def check_energy_conservation(self, path, hamiltonian):
"""Check energy conservation"""
energies = [hamiltonian(state) for state in path]
energy_std = np.std(energies)
return energy_std < self.tolerance
def check_momentum_conservation(self, path):
"""Check momentum conservation"""
momenta = path[:, 1::2] # Extract momentum components
momentum_std = np.std(np.sum(momenta, axis=1))
return momentum_std < self.tolerance
2. Error Analysis
class ErrorAnalyzer:
"""Analyze numerical errors"""
def __init__(self):
pass
def compute_local_error(self, exact_path, numerical_path):
"""Compute local truncation error"""
return np.max(np.abs(exact_path - numerical_path))
def compute_global_error(self, exact_path, numerical_path):
"""Compute global accumulation error"""
return np.sqrt(np.mean((exact_path - numerical_path)**2))
def convergence_rate(self, dt_values, errors):
"""Compute convergence rate"""
return np.polyfit(np.log(dt_values), np.log(errors), 1)[0]
References
See Also