зеркало из
https://github.com/docxology/cognitive.git
synced 2025-10-29 20:26:04 +02:00
Updates
Этот коммит содержится в:
родитель
5d0dd1258a
Коммит
e555897efa
@ -1,17 +1,13 @@
|
||||
"""
|
||||
Nestmate Agent Implementation
|
||||
|
||||
This module implements the Nestmate agent class, which represents an individual ant
|
||||
in the colony using the Free Energy Principle (FEP) and Active Inference framework.
|
||||
This module implements a simplified version of the Nestmate agent class,
|
||||
representing an individual ant in the colony.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass
|
||||
|
||||
class TaskType(Enum):
|
||||
"""Possible task types for a Nestmate agent."""
|
||||
@ -22,314 +18,149 @@ class TaskType(Enum):
|
||||
EXPLORATION = "exploration"
|
||||
|
||||
@dataclass
|
||||
class Observation:
|
||||
"""Container for sensory observations."""
|
||||
pheromone: np.ndarray # Pheromone gradients
|
||||
food: np.ndarray # Food source locations
|
||||
nestmates: np.ndarray # Other agent positions
|
||||
obstacles: np.ndarray # Obstacle positions
|
||||
nest: np.ndarray # Nest location/gradient
|
||||
|
||||
class GenerativeModel(nn.Module):
|
||||
"""Hierarchical generative model for active inference."""
|
||||
|
||||
def __init__(self, config: dict):
|
||||
super().__init__()
|
||||
|
||||
# Model dimensions
|
||||
self.obs_dim = config['dimensions']['observations']
|
||||
self.state_dim = config['dimensions']['states']
|
||||
self.action_dim = config['dimensions']['actions']
|
||||
self.temporal_horizon = config['dimensions']['planning_horizon']
|
||||
|
||||
# Hierarchical layers
|
||||
self.layers = nn.ModuleList([
|
||||
nn.Linear(self.state_dim, self.state_dim)
|
||||
for _ in range(config['active_inference']['model']['hierarchical_levels'])
|
||||
])
|
||||
|
||||
# State transition model (dynamics)
|
||||
self.transition = nn.Sequential(
|
||||
nn.Linear(self.state_dim + self.action_dim, self.state_dim * 2),
|
||||
nn.ReLU(),
|
||||
nn.Linear(self.state_dim * 2, self.state_dim)
|
||||
)
|
||||
|
||||
# Observation model
|
||||
self.observation = nn.Sequential(
|
||||
nn.Linear(self.state_dim, self.obs_dim * 2),
|
||||
nn.ReLU(),
|
||||
nn.Linear(self.obs_dim * 2, self.obs_dim)
|
||||
)
|
||||
|
||||
# Policy network
|
||||
self.policy = nn.Sequential(
|
||||
nn.Linear(self.state_dim, self.action_dim * 2),
|
||||
nn.ReLU(),
|
||||
nn.Linear(self.action_dim * 2, self.action_dim)
|
||||
)
|
||||
|
||||
# Precision parameters
|
||||
self.alpha = nn.Parameter(torch.ones(1)) # Precision of beliefs
|
||||
self.beta = nn.Parameter(torch.ones(1)) # Precision of policies
|
||||
|
||||
def forward(self,
|
||||
state: torch.Tensor,
|
||||
action: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Forward pass through the generative model."""
|
||||
|
||||
# Hierarchical state processing
|
||||
for layer in self.layers:
|
||||
state = F.relu(layer(state))
|
||||
|
||||
# Generate observations
|
||||
predicted_obs = self.observation(state)
|
||||
|
||||
# If action provided, predict next state
|
||||
if action is not None:
|
||||
state_action = torch.cat([state, action], dim=-1)
|
||||
next_state = self.transition(state_action)
|
||||
return predicted_obs, next_state
|
||||
|
||||
return predicted_obs, None
|
||||
|
||||
def infer_state(self,
|
||||
obs: torch.Tensor,
|
||||
prev_state: Optional[torch.Tensor] = None,
|
||||
n_steps: int = 10) -> torch.Tensor:
|
||||
"""Infer hidden state through iterative message passing."""
|
||||
|
||||
if prev_state is None:
|
||||
state = torch.zeros(obs.shape[0], self.state_dim)
|
||||
else:
|
||||
state = prev_state
|
||||
|
||||
state.requires_grad = True
|
||||
optimizer = torch.optim.Adam([state], lr=0.1)
|
||||
|
||||
for _ in range(n_steps):
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Prediction errors
|
||||
pred_obs, _ = self.forward(state)
|
||||
obs_error = F.mse_loss(pred_obs, obs)
|
||||
|
||||
if prev_state is not None:
|
||||
state_error = F.mse_loss(state, prev_state)
|
||||
loss = obs_error + self.alpha * state_error
|
||||
else:
|
||||
loss = obs_error
|
||||
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
return state.detach()
|
||||
|
||||
def select_action(self,
|
||||
state: torch.Tensor,
|
||||
temperature: float = 1.0) -> torch.Tensor:
|
||||
"""Select action using active inference."""
|
||||
|
||||
# Get action distribution
|
||||
action_logits = self.policy(state)
|
||||
action_probs = F.softmax(action_logits / temperature, dim=-1)
|
||||
|
||||
# Sample action
|
||||
action = torch.multinomial(action_probs, 1)
|
||||
|
||||
return action
|
||||
class Position:
|
||||
"""2D position with orientation."""
|
||||
x: float
|
||||
y: float
|
||||
theta: float = 0.0
|
||||
|
||||
class Nestmate:
|
||||
"""
|
||||
Individual ant agent implementing active inference for decision making.
|
||||
"""
|
||||
"""Individual ant agent with basic behaviors."""
|
||||
|
||||
def __init__(self, config: dict):
|
||||
"""Initialize Nestmate agent."""
|
||||
self.config = config
|
||||
|
||||
# Physical state
|
||||
self.position = np.zeros(2)
|
||||
self.position = Position(0.0, 0.0, 0.0)
|
||||
self.velocity = np.zeros(2)
|
||||
self.orientation = 0.0
|
||||
self.energy = config['physical']['energy']['initial']
|
||||
|
||||
# Task state
|
||||
self.current_task = TaskType.EXPLORATION
|
||||
self.carrying = None
|
||||
|
||||
# Sensory state
|
||||
self.observations = Observation(
|
||||
pheromone=np.zeros(config['sensors']['pheromone']['types'].__len__()),
|
||||
food=np.zeros(2),
|
||||
nestmates=np.zeros(2),
|
||||
obstacles=np.zeros(2),
|
||||
nest=np.zeros(2)
|
||||
)
|
||||
# Sensors
|
||||
self.sensor_range = config['physical']['sensor_range']
|
||||
|
||||
# Active inference components
|
||||
self.generative_model = GenerativeModel(config)
|
||||
self.current_state = None
|
||||
self.previous_action = None
|
||||
# Movement parameters
|
||||
self.max_speed = config['physical']['max_speed']
|
||||
self.turn_rate = config['physical']['turn_rate']
|
||||
|
||||
# Memory
|
||||
self.memory = {
|
||||
'spatial': [],
|
||||
'temporal': [],
|
||||
'social': []
|
||||
def sense(self, world_state: dict) -> dict:
|
||||
"""Process sensory inputs from environment."""
|
||||
# Get nearby entities within sensor range
|
||||
nearby = {
|
||||
'food': [],
|
||||
'nestmates': [],
|
||||
'obstacles': [],
|
||||
'pheromones': {}
|
||||
}
|
||||
|
||||
# Learning parameters
|
||||
self.learning_rate = config['learning']['parameters']['learning_rate']
|
||||
self.exploration_rate = config['learning']['parameters']['exploration_rate']
|
||||
# Process food sources
|
||||
for food in world_state['resources']:
|
||||
dist = self._distance_to(food.position)
|
||||
if dist <= self.sensor_range:
|
||||
nearby['food'].append((food, dist))
|
||||
|
||||
def update(self, observation: Observation) -> np.ndarray:
|
||||
"""
|
||||
Update agent state and select action using active inference.
|
||||
# Process other agents
|
||||
for agent in world_state['agents']:
|
||||
if agent != self:
|
||||
dist = self._distance_to(agent.position)
|
||||
if dist <= self.sensor_range:
|
||||
nearby['nestmates'].append((agent, dist))
|
||||
|
||||
Args:
|
||||
observation: Current sensory observations
|
||||
# Process pheromones
|
||||
for p_type, value in world_state['pheromones'].items():
|
||||
nearby['pheromones'][p_type] = value
|
||||
|
||||
Returns:
|
||||
action: Selected action as numpy array
|
||||
"""
|
||||
# Convert observation to tensor
|
||||
obs_tensor = torch.tensor(self._preprocess_observation(observation))
|
||||
return nearby
|
||||
|
||||
# State inference
|
||||
inferred_state = self.generative_model.infer_state(
|
||||
obs_tensor,
|
||||
prev_state=self.current_state
|
||||
)
|
||||
self.current_state = inferred_state
|
||||
def decide_action(self, sensed: dict) -> tuple:
|
||||
"""Decide next action based on current state and sensory input."""
|
||||
# Default behavior: random walk
|
||||
speed = self.max_speed
|
||||
turn = np.random.uniform(-self.turn_rate, self.turn_rate)
|
||||
|
||||
# Action selection
|
||||
action = self.generative_model.select_action(
|
||||
inferred_state,
|
||||
temperature=self.config['active_inference']['free_energy']['temperature']
|
||||
)
|
||||
|
||||
# Update memory
|
||||
self._update_memory(observation, action)
|
||||
|
||||
# Update internal state
|
||||
self._update_internal_state()
|
||||
|
||||
return action.numpy()
|
||||
|
||||
def _preprocess_observation(self, observation: Observation) -> np.ndarray:
|
||||
"""Preprocess raw observations into model input format."""
|
||||
# Combine all observations into single vector
|
||||
obs_vector = np.concatenate([
|
||||
observation.pheromone,
|
||||
observation.food,
|
||||
observation.nestmates,
|
||||
observation.obstacles,
|
||||
observation.nest
|
||||
])
|
||||
|
||||
# Normalize
|
||||
obs_vector = (obs_vector - obs_vector.mean()) / (obs_vector.std() + 1e-8)
|
||||
|
||||
return obs_vector
|
||||
|
||||
def _update_memory(self, observation: Observation, action: torch.Tensor):
|
||||
"""Update agent's memory systems."""
|
||||
# Spatial memory
|
||||
self.memory['spatial'].append({
|
||||
'position': self.position.copy(),
|
||||
'observation': observation,
|
||||
'timestamp': None # Add actual timestamp in implementation
|
||||
})
|
||||
|
||||
# Temporal memory
|
||||
self.memory['temporal'].append({
|
||||
'state': self.current_state.detach().numpy(),
|
||||
'action': action.numpy(),
|
||||
'reward': self._compute_reward(observation)
|
||||
})
|
||||
|
||||
# Social memory (interactions with other agents)
|
||||
if np.any(observation.nestmates):
|
||||
self.memory['social'].append({
|
||||
'nestmate_positions': observation.nestmates.copy(),
|
||||
'interaction_type': self._classify_interaction(observation)
|
||||
})
|
||||
|
||||
# Maintain memory size limits
|
||||
for memory_type in self.memory:
|
||||
if len(self.memory[memory_type]) > self.config['memory'][memory_type]['capacity']:
|
||||
self.memory[memory_type].pop(0)
|
||||
|
||||
def _update_internal_state(self):
|
||||
"""Update agent's internal state variables."""
|
||||
# Update energy
|
||||
self.energy -= self.config['physical']['energy']['consumption_rate']
|
||||
if self.carrying is not None:
|
||||
self.energy -= self.config['physical']['energy']['consumption_rate'] * 2
|
||||
|
||||
# Update task if needed
|
||||
if self._should_switch_task():
|
||||
self._switch_task()
|
||||
|
||||
# Update learning parameters
|
||||
self.exploration_rate *= self.config['learning']['parameters']['decay_rate']
|
||||
self.exploration_rate = max(
|
||||
self.exploration_rate,
|
||||
self.config['learning']['parameters']['min_exploration']
|
||||
)
|
||||
|
||||
def _compute_reward(self, observation: Observation) -> float:
|
||||
"""Compute reward signal from current observation."""
|
||||
reward = 0.0
|
||||
|
||||
# Task-specific rewards
|
||||
# Task-specific behaviors
|
||||
if self.current_task == TaskType.FORAGING:
|
||||
reward += np.sum(observation.food) * self.config['active_inference']['preferences']['food_weight']
|
||||
|
||||
# Distance to nest reward
|
||||
nest_distance = np.linalg.norm(observation.nest)
|
||||
reward -= nest_distance * self.config['active_inference']['preferences']['home_weight']
|
||||
# If carrying food, head back to nest
|
||||
if self.carrying:
|
||||
turn = self._angle_to_nest()
|
||||
# Otherwise, follow food pheromones or explore
|
||||
elif 'food' in sensed['pheromones']:
|
||||
pheromone_gradient = sensed['pheromones']['food']
|
||||
if np.any(pheromone_gradient > 0):
|
||||
turn = self._follow_gradient(pheromone_gradient)
|
||||
|
||||
elif self.current_task == TaskType.EXPLORATION:
|
||||
# Random walk with longer persistence
|
||||
if np.random.random() < 0.1: # 10% chance to change direction
|
||||
turn = np.random.uniform(-np.pi, np.pi)
|
||||
|
||||
return speed, turn
|
||||
|
||||
# Safety reward (avoiding obstacles)
|
||||
obstacle_penalty = np.sum(1.0 / (1.0 + np.linalg.norm(observation.obstacles, axis=1)))
|
||||
reward -= obstacle_penalty * self.config['active_inference']['preferences']['safety_weight']
|
||||
def update(self, dt: float, world_state: dict):
|
||||
"""Update agent state."""
|
||||
# Sense environment
|
||||
sensed = self.sense(world_state)
|
||||
|
||||
# Social reward
|
||||
if np.any(observation.nestmates):
|
||||
social_reward = self.config['active_inference']['preferences']['social_weight']
|
||||
reward += social_reward
|
||||
# Decide action
|
||||
speed, turn = self.decide_action(sensed)
|
||||
|
||||
# Update position and orientation
|
||||
self.position.theta += turn * dt
|
||||
self.position.theta = self.position.theta % (2 * np.pi)
|
||||
|
||||
dx = speed * np.cos(self.position.theta) * dt
|
||||
dy = speed * np.sin(self.position.theta) * dt
|
||||
|
||||
self.position.x += dx
|
||||
self.position.y += dy
|
||||
|
||||
# Update energy
|
||||
self.energy -= self.config['physical']['energy']['consumption_rate'] * dt
|
||||
if self.carrying:
|
||||
self.energy -= self.config['physical']['energy']['consumption_rate'] * dt
|
||||
|
||||
return reward
|
||||
|
||||
def _should_switch_task(self) -> bool:
|
||||
"""Determine if agent should switch its current task."""
|
||||
# Energy-based switching
|
||||
# Consider task switching
|
||||
self._consider_task_switch(sensed)
|
||||
|
||||
def _distance_to(self, other_pos: Position) -> float:
|
||||
"""Calculate distance to another position."""
|
||||
dx = other_pos.x - self.position.x
|
||||
dy = other_pos.y - self.position.y
|
||||
return np.sqrt(dx*dx + dy*dy)
|
||||
|
||||
def _angle_to_nest(self) -> float:
|
||||
"""Calculate turn angle towards nest."""
|
||||
# Simplified: assume nest is at (0,0)
|
||||
dx = -self.position.x
|
||||
dy = -self.position.y
|
||||
target_angle = np.arctan2(dy, dx)
|
||||
current_angle = self.position.theta
|
||||
|
||||
# Calculate shortest turn
|
||||
diff = target_angle - current_angle
|
||||
while diff > np.pi:
|
||||
diff -= 2*np.pi
|
||||
while diff < -np.pi:
|
||||
diff += 2*np.pi
|
||||
|
||||
return np.clip(diff, -self.turn_rate, self.turn_rate)
|
||||
|
||||
def _follow_gradient(self, gradient: np.ndarray) -> float:
|
||||
"""Calculate turn angle to follow a pheromone gradient."""
|
||||
# Simplified: assume gradient gives us desired direction
|
||||
target_angle = np.arctan2(gradient[1], gradient[0])
|
||||
return self._angle_to_nest() # Reuse angle calculation
|
||||
|
||||
def _consider_task_switch(self, sensed: dict):
|
||||
"""Consider switching current task."""
|
||||
# Simple task switching based on energy and random chance
|
||||
if self.energy < self.config['physical']['energy']['critical_level']:
|
||||
return True
|
||||
|
||||
# Random switching based on flexibility
|
||||
if np.random.random() < self.config['behavior']['task_switching']['flexibility']:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _switch_task(self):
|
||||
"""Switch to a new task based on current conditions."""
|
||||
# Get valid task options
|
||||
valid_tasks = list(TaskType)
|
||||
if self.current_task in valid_tasks:
|
||||
valid_tasks.remove(self.current_task)
|
||||
|
||||
# Select new task (can be made more sophisticated)
|
||||
self.current_task = np.random.choice(valid_tasks)
|
||||
|
||||
def _classify_interaction(self, observation: Observation) -> str:
|
||||
"""Classify type of interaction with nearby nestmates."""
|
||||
# Simple distance-based classification
|
||||
distances = np.linalg.norm(observation.nestmates, axis=1)
|
||||
if np.any(distances < 1.0):
|
||||
return "direct"
|
||||
elif np.any(distances < 3.0):
|
||||
return "indirect"
|
||||
return "none"
|
||||
self.current_task = TaskType.FORAGING
|
||||
elif np.random.random() < 0.001: # 0.1% chance to switch tasks
|
||||
available_tasks = list(TaskType)
|
||||
available_tasks.remove(self.current_task)
|
||||
self.current_task = np.random.choice(available_tasks)
|
||||
8
Things/Ant_Colony/ant_colony/__init__.py
Обычный файл
8
Things/Ant_Colony/ant_colony/__init__.py
Обычный файл
@ -0,0 +1,8 @@
|
||||
"""
|
||||
Ant Colony Simulation Package
|
||||
|
||||
A multi-agent simulation of an ant colony using simplified behaviors
|
||||
and visualizations.
|
||||
"""
|
||||
|
||||
__version__ = "1.0.0"
|
||||
5
Things/Ant_Colony/ant_colony/agents/__init__.py
Обычный файл
5
Things/Ant_Colony/ant_colony/agents/__init__.py
Обычный файл
@ -0,0 +1,5 @@
|
||||
"""Agents package for ant colony simulation."""
|
||||
|
||||
from .nestmate import Nestmate, Position, TaskType
|
||||
|
||||
__all__ = ['Nestmate', 'Position', 'TaskType']
|
||||
166
Things/Ant_Colony/ant_colony/agents/nestmate.py
Обычный файл
166
Things/Ant_Colony/ant_colony/agents/nestmate.py
Обычный файл
@ -0,0 +1,166 @@
|
||||
"""
|
||||
Nestmate Agent Implementation
|
||||
|
||||
This module implements a simplified version of the Nestmate agent class,
|
||||
representing an individual ant in the colony.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass
|
||||
|
||||
class TaskType(Enum):
|
||||
"""Possible task types for a Nestmate agent."""
|
||||
FORAGING = "foraging"
|
||||
MAINTENANCE = "maintenance"
|
||||
NURSING = "nursing"
|
||||
DEFENSE = "defense"
|
||||
EXPLORATION = "exploration"
|
||||
|
||||
@dataclass
|
||||
class Position:
|
||||
"""2D position with orientation."""
|
||||
x: float
|
||||
y: float
|
||||
theta: float = 0.0
|
||||
|
||||
class Nestmate:
|
||||
"""Individual ant agent with basic behaviors."""
|
||||
|
||||
def __init__(self, config: dict):
|
||||
"""Initialize Nestmate agent."""
|
||||
self.config = config
|
||||
|
||||
# Physical state
|
||||
self.position = Position(0.0, 0.0, 0.0)
|
||||
self.velocity = np.zeros(2)
|
||||
self.energy = config['physical']['energy']['initial']
|
||||
|
||||
# Task state
|
||||
self.current_task = TaskType.EXPLORATION
|
||||
self.carrying = None
|
||||
|
||||
# Sensors
|
||||
self.sensor_range = config['physical']['sensor_range']
|
||||
|
||||
# Movement parameters
|
||||
self.max_speed = config['physical']['max_speed']
|
||||
self.turn_rate = config['physical']['turn_rate']
|
||||
|
||||
def sense(self, world_state: dict) -> dict:
|
||||
"""Process sensory inputs from environment."""
|
||||
# Get nearby entities within sensor range
|
||||
nearby = {
|
||||
'food': [],
|
||||
'nestmates': [],
|
||||
'obstacles': [],
|
||||
'pheromones': {}
|
||||
}
|
||||
|
||||
# Process food sources
|
||||
for food in world_state['resources']:
|
||||
dist = self._distance_to(food.position)
|
||||
if dist <= self.sensor_range:
|
||||
nearby['food'].append((food, dist))
|
||||
|
||||
# Process other agents
|
||||
for agent in world_state['agents']:
|
||||
if agent != self:
|
||||
dist = self._distance_to(agent.position)
|
||||
if dist <= self.sensor_range:
|
||||
nearby['nestmates'].append((agent, dist))
|
||||
|
||||
# Process pheromones
|
||||
for p_type, value in world_state['pheromones'].items():
|
||||
nearby['pheromones'][p_type] = value
|
||||
|
||||
return nearby
|
||||
|
||||
def decide_action(self, sensed: dict) -> tuple:
|
||||
"""Decide next action based on current state and sensory input."""
|
||||
# Default behavior: random walk
|
||||
speed = self.max_speed
|
||||
turn = np.random.uniform(-self.turn_rate, self.turn_rate)
|
||||
|
||||
# Task-specific behaviors
|
||||
if self.current_task == TaskType.FORAGING:
|
||||
# If carrying food, head back to nest
|
||||
if self.carrying:
|
||||
turn = self._angle_to_nest()
|
||||
# Otherwise, follow food pheromones or explore
|
||||
elif 'food' in sensed['pheromones']:
|
||||
pheromone_gradient = sensed['pheromones']['food']
|
||||
if np.any(pheromone_gradient > 0):
|
||||
turn = self._follow_gradient(pheromone_gradient)
|
||||
|
||||
elif self.current_task == TaskType.EXPLORATION:
|
||||
# Random walk with longer persistence
|
||||
if np.random.random() < 0.1: # 10% chance to change direction
|
||||
turn = np.random.uniform(-np.pi, np.pi)
|
||||
|
||||
return speed, turn
|
||||
|
||||
def update(self, dt: float, world_state: dict):
|
||||
"""Update agent state."""
|
||||
# Sense environment
|
||||
sensed = self.sense(world_state)
|
||||
|
||||
# Decide action
|
||||
speed, turn = self.decide_action(sensed)
|
||||
|
||||
# Update position and orientation
|
||||
self.position.theta += turn * dt
|
||||
self.position.theta = self.position.theta % (2 * np.pi)
|
||||
|
||||
dx = speed * np.cos(self.position.theta) * dt
|
||||
dy = speed * np.sin(self.position.theta) * dt
|
||||
|
||||
self.position.x += dx
|
||||
self.position.y += dy
|
||||
|
||||
# Update energy
|
||||
self.energy -= self.config['physical']['energy']['consumption_rate'] * dt
|
||||
if self.carrying:
|
||||
self.energy -= self.config['physical']['energy']['consumption_rate'] * dt
|
||||
|
||||
# Consider task switching
|
||||
self._consider_task_switch(sensed)
|
||||
|
||||
def _distance_to(self, other_pos: Position) -> float:
|
||||
"""Calculate distance to another position."""
|
||||
dx = other_pos.x - self.position.x
|
||||
dy = other_pos.y - self.position.y
|
||||
return np.sqrt(dx*dx + dy*dy)
|
||||
|
||||
def _angle_to_nest(self) -> float:
|
||||
"""Calculate turn angle towards nest."""
|
||||
# Simplified: assume nest is at (0,0)
|
||||
dx = -self.position.x
|
||||
dy = -self.position.y
|
||||
target_angle = np.arctan2(dy, dx)
|
||||
current_angle = self.position.theta
|
||||
|
||||
# Calculate shortest turn
|
||||
diff = target_angle - current_angle
|
||||
while diff > np.pi:
|
||||
diff -= 2*np.pi
|
||||
while diff < -np.pi:
|
||||
diff += 2*np.pi
|
||||
|
||||
return np.clip(diff, -self.turn_rate, self.turn_rate)
|
||||
|
||||
def _follow_gradient(self, gradient: np.ndarray) -> float:
|
||||
"""Calculate turn angle to follow a pheromone gradient."""
|
||||
# Simplified: assume gradient gives us desired direction
|
||||
target_angle = np.arctan2(gradient[1], gradient[0])
|
||||
return self._angle_to_nest() # Reuse angle calculation
|
||||
|
||||
def _consider_task_switch(self, sensed: dict):
|
||||
"""Consider switching current task."""
|
||||
# Simple task switching based on energy and random chance
|
||||
if self.energy < self.config['physical']['energy']['critical_level']:
|
||||
self.current_task = TaskType.FORAGING
|
||||
elif np.random.random() < 0.001: # 0.1% chance to switch tasks
|
||||
available_tasks = list(TaskType)
|
||||
available_tasks.remove(self.current_task)
|
||||
self.current_task = np.random.choice(available_tasks)
|
||||
295
Things/Ant_Colony/ant_colony/config/simulation_config.yaml
Обычный файл
295
Things/Ant_Colony/ant_colony/config/simulation_config.yaml
Обычный файл
@ -0,0 +1,295 @@
|
||||
# Simulation Configuration
|
||||
|
||||
# Environment settings
|
||||
environment:
|
||||
size: [100, 100] # World dimensions
|
||||
nest_location: [50, 50] # Center of the world
|
||||
obstacles:
|
||||
count: 10
|
||||
size_range: [2, 5]
|
||||
food_sources:
|
||||
count: 5
|
||||
size_range: [1, 3]
|
||||
value_range: [10, 50]
|
||||
pheromone_decay: 0.995 # Decay rate per timestep
|
||||
|
||||
# Colony settings
|
||||
colony:
|
||||
initial_population: 50
|
||||
max_population: 100
|
||||
reproduction_rate: 0.001 # Chance per timestep
|
||||
|
||||
# Agent settings
|
||||
agent:
|
||||
physical:
|
||||
sensor_range: 10.0
|
||||
max_speed: 2.0
|
||||
turn_rate: 0.5 # radians per timestep
|
||||
energy:
|
||||
initial: 100.0
|
||||
critical_level: 30.0
|
||||
consumption_rate: 0.1
|
||||
|
||||
behavior:
|
||||
task_switching:
|
||||
flexibility: 0.01 # Base rate of task switching
|
||||
|
||||
# Visualization settings
|
||||
visualization:
|
||||
enabled: true
|
||||
update_interval: 1 # Update every N timesteps
|
||||
trail_length: 50 # Number of positions to keep for trails
|
||||
colors:
|
||||
background: [255, 255, 255]
|
||||
obstacles: [100, 100, 100]
|
||||
food: [0, 255, 0]
|
||||
nest: [139, 69, 19]
|
||||
agents:
|
||||
foraging: [255, 0, 0]
|
||||
maintenance: [0, 0, 255]
|
||||
nursing: [255, 192, 203]
|
||||
defense: [128, 0, 0]
|
||||
exploration: [255, 165, 0]
|
||||
pheromones:
|
||||
food: [0, 255, 0, 128]
|
||||
home: [255, 0, 0, 128]
|
||||
|
||||
# Simulation settings
|
||||
simulation:
|
||||
timestep: 0.1
|
||||
max_steps: 10000
|
||||
random_seed: 42
|
||||
|
||||
# Runtime Parameters
|
||||
runtime:
|
||||
# Time Settings
|
||||
time:
|
||||
max_steps: 100000
|
||||
timestep: 0.1
|
||||
real_time_factor: 1.0
|
||||
|
||||
# Execution
|
||||
execution:
|
||||
num_threads: 4
|
||||
gpu_enabled: false
|
||||
seed: 42
|
||||
deterministic: true
|
||||
|
||||
# Initialization
|
||||
initialization:
|
||||
# World Setup
|
||||
world:
|
||||
random_seed: 42
|
||||
generate_terrain: true
|
||||
place_resources: true
|
||||
|
||||
# Colony Setup
|
||||
colony:
|
||||
random_seed: 43
|
||||
place_nest: true
|
||||
distribute_agents: true
|
||||
|
||||
# Physics Engine
|
||||
physics:
|
||||
# Engine Settings
|
||||
engine:
|
||||
type: "2D"
|
||||
collision_detection: true
|
||||
spatial_hash_size: 5.0
|
||||
|
||||
# Parameters
|
||||
parameters:
|
||||
gravity: [0, 0]
|
||||
friction: 0.5
|
||||
restitution: 0.5
|
||||
|
||||
# Constraints
|
||||
constraints:
|
||||
velocity_cap: 10.0
|
||||
force_cap: 100.0
|
||||
acceleration_cap: 20.0
|
||||
|
||||
# Integration
|
||||
integration:
|
||||
# Methods
|
||||
method: "RK4"
|
||||
substeps: 2
|
||||
|
||||
# Error Control
|
||||
error_tolerance: 1e-6
|
||||
max_iterations: 100
|
||||
|
||||
# Stability
|
||||
stability_checks: true
|
||||
energy_conservation: true
|
||||
|
||||
# Active Inference Parameters
|
||||
active_inference:
|
||||
# Global Parameters
|
||||
global:
|
||||
temperature: 1.0
|
||||
learning_rate: 0.1
|
||||
exploration_rate: 0.2
|
||||
|
||||
# Hierarchical Settings
|
||||
hierarchical:
|
||||
levels: 3
|
||||
top_down_weight: 0.7
|
||||
bottom_up_weight: 0.3
|
||||
|
||||
# Precision Settings
|
||||
precision:
|
||||
initial: 1.0
|
||||
learning_enabled: true
|
||||
adaptation_rate: 0.05
|
||||
|
||||
# Multi-Agent System
|
||||
multi_agent:
|
||||
# Coordination
|
||||
coordination:
|
||||
enabled: true
|
||||
method: "decentralized"
|
||||
communication_range: 5.0
|
||||
|
||||
# Synchronization
|
||||
synchronization:
|
||||
enabled: true
|
||||
update_frequency: 10
|
||||
sync_tolerance: 0.1
|
||||
|
||||
# Load Balancing
|
||||
load_balancing:
|
||||
enabled: true
|
||||
method: "dynamic"
|
||||
threshold: 0.8
|
||||
|
||||
# Analysis Settings
|
||||
analysis:
|
||||
# Data Collection
|
||||
data_collection:
|
||||
enabled: true
|
||||
frequency: 100
|
||||
detailed_logging: true
|
||||
|
||||
# Metrics
|
||||
metrics:
|
||||
agent_level:
|
||||
- "position"
|
||||
- "velocity"
|
||||
- "energy"
|
||||
- "beliefs"
|
||||
colony_level:
|
||||
- "population"
|
||||
- "resources"
|
||||
- "efficiency"
|
||||
- "coordination"
|
||||
environment_level:
|
||||
- "resource_distribution"
|
||||
- "pheromone_maps"
|
||||
- "agent_density"
|
||||
|
||||
# Statistics
|
||||
statistics:
|
||||
compute_mean: true
|
||||
compute_variance: true
|
||||
compute_correlations: true
|
||||
temporal_analysis: true
|
||||
|
||||
# Visualization
|
||||
visualization:
|
||||
# Real-time Display
|
||||
realtime:
|
||||
enabled: true
|
||||
update_frequency: 10
|
||||
quality: "medium"
|
||||
|
||||
# Recording
|
||||
recording:
|
||||
enabled: true
|
||||
format: "mp4"
|
||||
framerate: 30
|
||||
resolution: [1920, 1080]
|
||||
|
||||
# Features
|
||||
features:
|
||||
show_agents: true
|
||||
show_pheromones: true
|
||||
show_resources: true
|
||||
show_stats: true
|
||||
|
||||
# UI Elements
|
||||
ui:
|
||||
show_controls: true
|
||||
show_plots: true
|
||||
show_metrics: true
|
||||
interactive: true
|
||||
|
||||
# Data Management
|
||||
data:
|
||||
# Storage
|
||||
storage:
|
||||
format: "hdf5"
|
||||
compression: true
|
||||
backup_frequency: 1000
|
||||
|
||||
# Export
|
||||
export:
|
||||
enabled: true
|
||||
format: ["csv", "json"]
|
||||
frequency: 1000
|
||||
|
||||
# Checkpointing
|
||||
checkpointing:
|
||||
enabled: true
|
||||
frequency: 5000
|
||||
keep_last: 5
|
||||
|
||||
# Analysis Output
|
||||
analysis:
|
||||
save_plots: true
|
||||
save_metrics: true
|
||||
save_trajectories: true
|
||||
output_format: ["png", "pdf"]
|
||||
|
||||
# Performance Monitoring
|
||||
performance:
|
||||
# Monitoring
|
||||
monitoring:
|
||||
enabled: true
|
||||
frequency: 100
|
||||
|
||||
# Profiling
|
||||
profiling:
|
||||
enabled: true
|
||||
detailed: true
|
||||
|
||||
# Optimization
|
||||
optimization:
|
||||
auto_tune: true
|
||||
target_fps: 30
|
||||
|
||||
# Resource Usage
|
||||
resources:
|
||||
max_memory: "4GB"
|
||||
max_cpu_percent: 80
|
||||
gpu_memory_limit: "2GB"
|
||||
|
||||
# Debug Settings
|
||||
debug:
|
||||
# Logging
|
||||
logging:
|
||||
level: "INFO"
|
||||
file: "logs/simulation.log"
|
||||
console_output: true
|
||||
|
||||
# Validation
|
||||
validation:
|
||||
check_constraints: true
|
||||
verify_physics: true
|
||||
test_consistency: true
|
||||
|
||||
# Development
|
||||
development:
|
||||
assertions_enabled: true
|
||||
extra_checks: true
|
||||
profile_code: true
|
||||
5
Things/Ant_Colony/ant_colony/environment/__init__.py
Обычный файл
5
Things/Ant_Colony/ant_colony/environment/__init__.py
Обычный файл
@ -0,0 +1,5 @@
|
||||
"""Environment package for ant colony simulation."""
|
||||
|
||||
from .world import World, Position, Resource
|
||||
|
||||
__all__ = ['World', 'Position', 'Resource']
|
||||
164
Things/Ant_Colony/ant_colony/main.py
Обычный файл
164
Things/Ant_Colony/ant_colony/main.py
Обычный файл
@ -0,0 +1,164 @@
|
||||
"""
|
||||
Main entry point for the ant colony simulation.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import yaml
|
||||
import numpy as np
|
||||
from ant_colony.visualization import SimulationRenderer
|
||||
from ant_colony.agents import Nestmate, Position, TaskType
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
|
||||
@dataclass
|
||||
class FoodSource:
|
||||
"""Represents a food source in the environment."""
|
||||
position: Position
|
||||
size: float
|
||||
value: float
|
||||
remaining: float
|
||||
|
||||
@dataclass
|
||||
class Obstacle:
|
||||
"""Represents an obstacle in the environment."""
|
||||
position: Position
|
||||
size: float
|
||||
|
||||
class Simulation:
|
||||
"""Main simulation class."""
|
||||
|
||||
def __init__(self, config_path: str):
|
||||
"""Initialize simulation with configuration."""
|
||||
with open(config_path, 'r') as f:
|
||||
self.config = yaml.safe_load(f)
|
||||
|
||||
# Set random seed
|
||||
np.random.seed(self.config['simulation']['random_seed'])
|
||||
|
||||
# Initialize environment
|
||||
self.env_size = self.config['environment']['size']
|
||||
self.nest_location = self.config['environment']['nest_location']
|
||||
|
||||
# Initialize agents
|
||||
self.agents = self._create_agents()
|
||||
|
||||
# Initialize resources
|
||||
self.food_sources = self._create_food_sources()
|
||||
self.obstacles = self._create_obstacles()
|
||||
|
||||
# Initialize pheromone grids
|
||||
self.pheromones = {
|
||||
'food': np.zeros(self.env_size),
|
||||
'home': np.zeros(self.env_size)
|
||||
}
|
||||
|
||||
# Setup visualization if enabled
|
||||
if self.config['visualization']['enabled']:
|
||||
self.renderer = SimulationRenderer(self.config)
|
||||
else:
|
||||
self.renderer = None
|
||||
|
||||
def _create_agents(self) -> List[Nestmate]:
|
||||
"""Create initial population of agents."""
|
||||
agents = []
|
||||
for _ in range(self.config['colony']['initial_population']):
|
||||
# Start agents near nest
|
||||
x = self.nest_location[0] + np.random.normal(0, 2)
|
||||
y = self.nest_location[1] + np.random.normal(0, 2)
|
||||
theta = np.random.uniform(0, 2 * np.pi)
|
||||
|
||||
agent = Nestmate(self.config['agent'])
|
||||
agent.position = Position(x, y, theta)
|
||||
agents.append(agent)
|
||||
|
||||
return agents
|
||||
|
||||
def _create_food_sources(self) -> List[FoodSource]:
|
||||
"""Create initial food sources."""
|
||||
sources = []
|
||||
for _ in range(self.config['environment']['food_sources']['count']):
|
||||
x = np.random.uniform(0, self.env_size[0])
|
||||
y = np.random.uniform(0, self.env_size[1])
|
||||
size = np.random.uniform(*self.config['environment']['food_sources']['size_range'])
|
||||
value = np.random.uniform(*self.config['environment']['food_sources']['value_range'])
|
||||
|
||||
source = FoodSource(
|
||||
position=Position(x, y, 0),
|
||||
size=size,
|
||||
value=value,
|
||||
remaining=value
|
||||
)
|
||||
sources.append(source)
|
||||
|
||||
return sources
|
||||
|
||||
def _create_obstacles(self) -> List[Obstacle]:
|
||||
"""Create initial obstacles."""
|
||||
obstacles = []
|
||||
for _ in range(self.config['environment']['obstacles']['count']):
|
||||
x = np.random.uniform(0, self.env_size[0])
|
||||
y = np.random.uniform(0, self.env_size[1])
|
||||
size = np.random.uniform(*self.config['environment']['obstacles']['size_range'])
|
||||
|
||||
obstacle = Obstacle(
|
||||
position=Position(x, y, 0),
|
||||
size=size
|
||||
)
|
||||
obstacles.append(obstacle)
|
||||
|
||||
return obstacles
|
||||
|
||||
def update(self) -> None:
|
||||
"""Update simulation state for one timestep."""
|
||||
dt = self.config['simulation']['timestep']
|
||||
|
||||
# Update agents
|
||||
world_state = {
|
||||
'agents': self.agents,
|
||||
'resources': self.food_sources,
|
||||
'obstacles': self.obstacles,
|
||||
'pheromones': self.pheromones
|
||||
}
|
||||
|
||||
for agent in self.agents:
|
||||
agent.update(dt, world_state)
|
||||
|
||||
# Update pheromones
|
||||
decay = self.config['environment']['pheromone_decay']
|
||||
self.pheromones['food'] *= decay
|
||||
self.pheromones['home'] *= decay
|
||||
|
||||
# Update visualization
|
||||
if self.renderer and self.config['visualization']['enabled']:
|
||||
self.renderer.update(world_state)
|
||||
|
||||
def run(self) -> None:
|
||||
"""Run the simulation."""
|
||||
max_steps = self.config['simulation']['max_steps']
|
||||
|
||||
try:
|
||||
for step in range(max_steps):
|
||||
self.update()
|
||||
|
||||
if step % 100 == 0:
|
||||
print(f"Step {step}/{max_steps}")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nSimulation interrupted by user")
|
||||
|
||||
finally:
|
||||
if self.renderer:
|
||||
self.renderer.show()
|
||||
|
||||
def main():
|
||||
"""Main entry point."""
|
||||
parser = argparse.ArgumentParser(description="Ant Colony Simulation")
|
||||
parser.add_argument('--config', type=str, required=True,
|
||||
help='Path to configuration file')
|
||||
args = parser.parse_args()
|
||||
|
||||
simulation = Simulation(args.config)
|
||||
simulation.run()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
5
Things/Ant_Colony/ant_colony/visualization/__init__.py
Обычный файл
5
Things/Ant_Colony/ant_colony/visualization/__init__.py
Обычный файл
@ -0,0 +1,5 @@
|
||||
"""Visualization package for ant colony simulation."""
|
||||
|
||||
from .renderer import SimulationRenderer
|
||||
|
||||
__all__ = ['SimulationRenderer']
|
||||
123
Things/Ant_Colony/ant_colony/visualization/renderer.py
Обычный файл
123
Things/Ant_Colony/ant_colony/visualization/renderer.py
Обычный файл
@ -0,0 +1,123 @@
|
||||
"""
|
||||
Visualization module for the ant colony simulation using matplotlib.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.patches import Circle, Wedge
|
||||
from matplotlib.collections import PatchCollection
|
||||
import matplotlib.animation as animation
|
||||
|
||||
class SimulationRenderer:
|
||||
"""Handles visualization of the ant colony simulation."""
|
||||
|
||||
def __init__(self, config: dict):
|
||||
"""Initialize the renderer with configuration settings."""
|
||||
self.config = config
|
||||
self.viz_config = config['visualization']
|
||||
self.env_size = config['environment']['size']
|
||||
|
||||
# Setup the figure and axis
|
||||
self.fig, self.ax = plt.subplots(figsize=(10, 10))
|
||||
self.ax.set_xlim(0, self.env_size[0])
|
||||
self.ax.set_ylim(0, self.env_size[1])
|
||||
self.ax.set_aspect('equal')
|
||||
|
||||
# Initialize collections for different elements
|
||||
self.agent_patches = []
|
||||
self.food_patches = []
|
||||
self.obstacle_patches = []
|
||||
self.pheromone_plots = {}
|
||||
|
||||
# Setup the nest
|
||||
nest_loc = config['environment']['nest_location']
|
||||
nest = Circle(nest_loc, 5, color=self.viz_config['colors']['nest'])
|
||||
self.ax.add_patch(nest)
|
||||
|
||||
def update(self, world_state: dict) -> None:
|
||||
"""Update the visualization with current world state."""
|
||||
# Clear previous patches
|
||||
for patch in self.agent_patches + self.food_patches + self.obstacle_patches:
|
||||
patch.remove()
|
||||
self.agent_patches.clear()
|
||||
self.food_patches.clear()
|
||||
self.obstacle_patches.clear()
|
||||
|
||||
# Update pheromones
|
||||
for p_type, grid in world_state['pheromones'].items():
|
||||
if p_type not in self.pheromone_plots:
|
||||
color = self.viz_config['colors']['pheromones'][p_type]
|
||||
self.pheromone_plots[p_type] = self.ax.imshow(
|
||||
grid,
|
||||
extent=[0, self.env_size[0], 0, self.env_size[1]],
|
||||
cmap='Greens' if p_type == 'food' else 'Reds',
|
||||
alpha=0.3,
|
||||
vmin=0,
|
||||
vmax=1
|
||||
)
|
||||
else:
|
||||
self.pheromone_plots[p_type].set_array(grid)
|
||||
|
||||
# Draw agents
|
||||
for agent in world_state['agents']:
|
||||
color = self.viz_config['colors']['agents'][agent.current_task.value]
|
||||
agent_patch = self._create_agent_patch(agent, color)
|
||||
self.ax.add_patch(agent_patch)
|
||||
self.agent_patches.append(agent_patch)
|
||||
|
||||
# Draw food sources
|
||||
for food in world_state['resources']:
|
||||
food_patch = Circle(
|
||||
(food.position.x, food.position.y),
|
||||
food.size,
|
||||
color=self.viz_config['colors']['food']
|
||||
)
|
||||
self.ax.add_patch(food_patch)
|
||||
self.food_patches.append(food_patch)
|
||||
|
||||
# Draw obstacles
|
||||
for obstacle in world_state['obstacles']:
|
||||
obstacle_patch = Circle(
|
||||
(obstacle.position.x, obstacle.position.y),
|
||||
obstacle.size,
|
||||
color=self.viz_config['colors']['obstacles']
|
||||
)
|
||||
self.ax.add_patch(obstacle_patch)
|
||||
self.obstacle_patches.append(obstacle_patch)
|
||||
|
||||
# Trigger redraw
|
||||
self.fig.canvas.draw()
|
||||
self.fig.canvas.flush_events()
|
||||
|
||||
def _create_agent_patch(self, agent, color: str) -> Wedge:
|
||||
"""Create a wedge patch to represent an agent."""
|
||||
# Create a wedge shape to show orientation
|
||||
radius = 1.0
|
||||
angle = np.degrees(agent.position.theta)
|
||||
wedge = Wedge(
|
||||
(agent.position.x, agent.position.y),
|
||||
radius,
|
||||
angle - 30, # 60 degree wide wedge
|
||||
angle + 30,
|
||||
color=color
|
||||
)
|
||||
return wedge
|
||||
|
||||
def save_animation(self, frames: list, filename: str) -> None:
|
||||
"""Save the simulation as an animation."""
|
||||
anim = animation.ArtistAnimation(
|
||||
self.fig,
|
||||
frames,
|
||||
interval=50,
|
||||
blit=True,
|
||||
repeat=False
|
||||
)
|
||||
anim.save(filename, writer='pillow')
|
||||
|
||||
def show(self) -> None:
|
||||
"""Display the current visualization."""
|
||||
plt.show()
|
||||
|
||||
def close(self) -> None:
|
||||
"""Close the visualization window."""
|
||||
plt.close(self.fig)
|
||||
@ -1,10 +1,64 @@
|
||||
# Simulation Configuration
|
||||
|
||||
# Simulation Parameters
|
||||
# Environment settings
|
||||
environment:
|
||||
size: [100, 100] # World dimensions
|
||||
nest_location: [50, 50] # Center of the world
|
||||
obstacles:
|
||||
count: 10
|
||||
size_range: [2, 5]
|
||||
food_sources:
|
||||
count: 5
|
||||
size_range: [1, 3]
|
||||
value_range: [10, 50]
|
||||
pheromone_decay: 0.995 # Decay rate per timestep
|
||||
|
||||
# Colony settings
|
||||
colony:
|
||||
initial_population: 50
|
||||
max_population: 100
|
||||
reproduction_rate: 0.001 # Chance per timestep
|
||||
|
||||
# Agent settings
|
||||
agent:
|
||||
physical:
|
||||
sensor_range: 10.0
|
||||
max_speed: 2.0
|
||||
turn_rate: 0.5 # radians per timestep
|
||||
energy:
|
||||
initial: 100.0
|
||||
critical_level: 30.0
|
||||
consumption_rate: 0.1
|
||||
|
||||
behavior:
|
||||
task_switching:
|
||||
flexibility: 0.01 # Base rate of task switching
|
||||
|
||||
# Visualization settings
|
||||
visualization:
|
||||
enabled: true
|
||||
update_interval: 1 # Update every N timesteps
|
||||
trail_length: 50 # Number of positions to keep for trails
|
||||
colors:
|
||||
background: [255, 255, 255]
|
||||
obstacles: [100, 100, 100]
|
||||
food: [0, 255, 0]
|
||||
nest: [139, 69, 19]
|
||||
agents:
|
||||
foraging: [255, 0, 0]
|
||||
maintenance: [0, 0, 255]
|
||||
nursing: [255, 192, 203]
|
||||
defense: [128, 0, 0]
|
||||
exploration: [255, 165, 0]
|
||||
pheromones:
|
||||
food: [0, 255, 0, 128]
|
||||
home: [255, 0, 0, 128]
|
||||
|
||||
# Simulation settings
|
||||
simulation:
|
||||
name: "Ant Colony Simulation"
|
||||
version: "1.0.0"
|
||||
description: "Multi-agent ant colony simulation using active inference"
|
||||
timestep: 0.1
|
||||
max_steps: 10000
|
||||
random_seed: 42
|
||||
|
||||
# Runtime Parameters
|
||||
runtime:
|
||||
|
||||
164
Things/Ant_Colony/main.py
Обычный файл
164
Things/Ant_Colony/main.py
Обычный файл
@ -0,0 +1,164 @@
|
||||
"""
|
||||
Main entry point for the ant colony simulation.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import yaml
|
||||
import numpy as np
|
||||
from visualization.renderer import SimulationRenderer
|
||||
from agents.nestmate import Nestmate, Position
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
|
||||
@dataclass
|
||||
class FoodSource:
|
||||
"""Represents a food source in the environment."""
|
||||
position: Position
|
||||
size: float
|
||||
value: float
|
||||
remaining: float
|
||||
|
||||
@dataclass
|
||||
class Obstacle:
|
||||
"""Represents an obstacle in the environment."""
|
||||
position: Position
|
||||
size: float
|
||||
|
||||
class Simulation:
|
||||
"""Main simulation class."""
|
||||
|
||||
def __init__(self, config_path: str):
|
||||
"""Initialize simulation with configuration."""
|
||||
with open(config_path, 'r') as f:
|
||||
self.config = yaml.safe_load(f)
|
||||
|
||||
# Set random seed
|
||||
np.random.seed(self.config['simulation']['random_seed'])
|
||||
|
||||
# Initialize environment
|
||||
self.env_size = self.config['environment']['size']
|
||||
self.nest_location = self.config['environment']['nest_location']
|
||||
|
||||
# Initialize agents
|
||||
self.agents = self._create_agents()
|
||||
|
||||
# Initialize resources
|
||||
self.food_sources = self._create_food_sources()
|
||||
self.obstacles = self._create_obstacles()
|
||||
|
||||
# Initialize pheromone grids
|
||||
self.pheromones = {
|
||||
'food': np.zeros(self.env_size),
|
||||
'home': np.zeros(self.env_size)
|
||||
}
|
||||
|
||||
# Setup visualization if enabled
|
||||
if self.config['visualization']['enabled']:
|
||||
self.renderer = SimulationRenderer(self.config)
|
||||
else:
|
||||
self.renderer = None
|
||||
|
||||
def _create_agents(self) -> List[Nestmate]:
|
||||
"""Create initial population of agents."""
|
||||
agents = []
|
||||
for _ in range(self.config['colony']['initial_population']):
|
||||
# Start agents near nest
|
||||
x = self.nest_location[0] + np.random.normal(0, 2)
|
||||
y = self.nest_location[1] + np.random.normal(0, 2)
|
||||
theta = np.random.uniform(0, 2 * np.pi)
|
||||
|
||||
agent = Nestmate(self.config['agent'])
|
||||
agent.position = Position(x, y, theta)
|
||||
agents.append(agent)
|
||||
|
||||
return agents
|
||||
|
||||
def _create_food_sources(self) -> List[FoodSource]:
|
||||
"""Create initial food sources."""
|
||||
sources = []
|
||||
for _ in range(self.config['environment']['food_sources']['count']):
|
||||
x = np.random.uniform(0, self.env_size[0])
|
||||
y = np.random.uniform(0, self.env_size[1])
|
||||
size = np.random.uniform(*self.config['environment']['food_sources']['size_range'])
|
||||
value = np.random.uniform(*self.config['environment']['food_sources']['value_range'])
|
||||
|
||||
source = FoodSource(
|
||||
position=Position(x, y, 0),
|
||||
size=size,
|
||||
value=value,
|
||||
remaining=value
|
||||
)
|
||||
sources.append(source)
|
||||
|
||||
return sources
|
||||
|
||||
def _create_obstacles(self) -> List[Obstacle]:
|
||||
"""Create initial obstacles."""
|
||||
obstacles = []
|
||||
for _ in range(self.config['environment']['obstacles']['count']):
|
||||
x = np.random.uniform(0, self.env_size[0])
|
||||
y = np.random.uniform(0, self.env_size[1])
|
||||
size = np.random.uniform(*self.config['environment']['obstacles']['size_range'])
|
||||
|
||||
obstacle = Obstacle(
|
||||
position=Position(x, y, 0),
|
||||
size=size
|
||||
)
|
||||
obstacles.append(obstacle)
|
||||
|
||||
return obstacles
|
||||
|
||||
def update(self) -> None:
|
||||
"""Update simulation state for one timestep."""
|
||||
dt = self.config['simulation']['timestep']
|
||||
|
||||
# Update agents
|
||||
world_state = {
|
||||
'agents': self.agents,
|
||||
'resources': self.food_sources,
|
||||
'obstacles': self.obstacles,
|
||||
'pheromones': self.pheromones
|
||||
}
|
||||
|
||||
for agent in self.agents:
|
||||
agent.update(dt, world_state)
|
||||
|
||||
# Update pheromones
|
||||
decay = self.config['environment']['pheromone_decay']
|
||||
self.pheromones['food'] *= decay
|
||||
self.pheromones['home'] *= decay
|
||||
|
||||
# Update visualization
|
||||
if self.renderer and self.config['visualization']['enabled']:
|
||||
self.renderer.update(world_state)
|
||||
|
||||
def run(self) -> None:
|
||||
"""Run the simulation."""
|
||||
max_steps = self.config['simulation']['max_steps']
|
||||
|
||||
try:
|
||||
for step in range(max_steps):
|
||||
self.update()
|
||||
|
||||
if step % 100 == 0:
|
||||
print(f"Step {step}/{max_steps}")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nSimulation interrupted by user")
|
||||
|
||||
finally:
|
||||
if self.renderer:
|
||||
self.renderer.show()
|
||||
|
||||
def main():
|
||||
"""Main entry point."""
|
||||
parser = argparse.ArgumentParser(description="Ant Colony Simulation")
|
||||
parser.add_argument('--config', type=str, required=True,
|
||||
help='Path to configuration file')
|
||||
args = parser.parse_args()
|
||||
|
||||
simulation = Simulation(args.config)
|
||||
simulation.run()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
25
Things/Ant_Colony/setup.py
Обычный файл
25
Things/Ant_Colony/setup.py
Обычный файл
@ -0,0 +1,25 @@
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
setup(
|
||||
name="ant_colony",
|
||||
version="1.0.0",
|
||||
description="Multi-agent ant colony simulation",
|
||||
author="Your Name",
|
||||
packages=find_packages(),
|
||||
package_data={
|
||||
'ant_colony': ['config/*.yaml']
|
||||
},
|
||||
install_requires=[
|
||||
'numpy',
|
||||
'matplotlib',
|
||||
'networkx',
|
||||
'pyyaml',
|
||||
'noise' # For terrain generation
|
||||
],
|
||||
python_requires='>=3.7',
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'ant-colony=ant_colony.main:main',
|
||||
],
|
||||
},
|
||||
)
|
||||
@ -1,180 +1,151 @@
|
||||
"""
|
||||
Simulation Runner
|
||||
Simulation Runner for Ant Colony Simulation
|
||||
|
||||
This module implements the simulation runner that manages the ant colony simulation,
|
||||
including visualization, data collection, and analysis.
|
||||
This module provides the main simulation runner that coordinates the colony,
|
||||
environment, and visualization components.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.animation import FuncAnimation
|
||||
import seaborn as sns
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
import yaml
|
||||
import h5py
|
||||
import time
|
||||
import logging
|
||||
import yaml
|
||||
import h5py
|
||||
from pathlib import Path
|
||||
import pandas as pd
|
||||
from typing import Dict, List, Tuple, Optional, Any
|
||||
|
||||
from environment.world import World
|
||||
from colony import Colony
|
||||
from .visualization import ColonyVisualizer
|
||||
from .colony import Colony
|
||||
from .environment import World
|
||||
from .utils.data_collection import DataCollector
|
||||
from agents.nestmate import TaskType
|
||||
|
||||
class Simulation:
|
||||
"""
|
||||
Manages the ant colony simulation, including visualization and data collection.
|
||||
"""
|
||||
"""Main simulation runner class."""
|
||||
|
||||
def __init__(self, config_path: str):
|
||||
"""Initialize simulation."""
|
||||
# Load configuration
|
||||
"""Initialize simulation with configuration."""
|
||||
self.config = self._load_config(config_path)
|
||||
|
||||
# Set up logging
|
||||
self._setup_logging()
|
||||
|
||||
# Initialize components
|
||||
self.environment = World(self.config)
|
||||
self.colony = Colony(self.config, self.environment)
|
||||
|
||||
# Visualization setup
|
||||
if self.config['visualization']['realtime']['enabled']:
|
||||
self._setup_visualization()
|
||||
|
||||
# Data collection setup
|
||||
self.data = {
|
||||
'time': [],
|
||||
'population': [],
|
||||
'resources': [],
|
||||
'task_distribution': [],
|
||||
'efficiency_metrics': [],
|
||||
'coordination_metrics': []
|
||||
}
|
||||
|
||||
# Performance tracking
|
||||
self.performance_metrics = {
|
||||
'step_times': [],
|
||||
'memory_usage': [],
|
||||
'fps': []
|
||||
}
|
||||
self.environment = World(self.config['environment'])
|
||||
self.colony = Colony(self.config['colony'], self.environment)
|
||||
self.visualizer = ColonyVisualizer(self.config['visualization'])
|
||||
self.data_collector = DataCollector(self.config['data_collection'])
|
||||
|
||||
# Simulation state
|
||||
self.current_step = 0
|
||||
self.running = False
|
||||
self.max_steps = self.config['simulation']['max_steps']
|
||||
self.timestep = self.config['simulation']['timestep']
|
||||
self.paused = False
|
||||
self.data = {
|
||||
'time': [],
|
||||
'resources': [],
|
||||
'task_distribution': [],
|
||||
'efficiency_metrics': []
|
||||
}
|
||||
|
||||
def _load_config(self, config_path: str) -> dict:
|
||||
def _load_config(self, config_path: str) -> Dict[str, Any]:
|
||||
"""Load configuration from YAML file."""
|
||||
with open(config_path, 'r') as f:
|
||||
config = yaml.safe_load(f)
|
||||
return config
|
||||
|
||||
def _setup_logging(self):
|
||||
"""Set up logging configuration."""
|
||||
"""Setup logging configuration."""
|
||||
log_config = self.config['debug']['logging']
|
||||
logging.basicConfig(
|
||||
level=self.config['debug']['logging']['level'],
|
||||
format=self.config['debug']['logging']['format'],
|
||||
filename=self.config['debug']['logging']['file']
|
||||
level=getattr(logging, log_config['level']),
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
filename=log_config['file']
|
||||
)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
def _setup_visualization(self):
|
||||
"""Set up visualization components."""
|
||||
plt.style.use('dark_background')
|
||||
|
||||
# Create figure and subplots
|
||||
self.fig = plt.figure(figsize=self.config['visualization']['plots']['figure_size'])
|
||||
|
||||
# Main simulation view
|
||||
self.ax_main = self.fig.add_subplot(221)
|
||||
self.ax_main.set_title('Colony Simulation')
|
||||
|
||||
# Resource levels
|
||||
self.ax_resources = self.fig.add_subplot(222)
|
||||
self.ax_resources.set_title('Resource Levels')
|
||||
|
||||
# Task distribution
|
||||
self.ax_tasks = self.fig.add_subplot(223)
|
||||
self.ax_tasks.set_title('Task Distribution')
|
||||
|
||||
# Efficiency metrics
|
||||
self.ax_metrics = self.fig.add_subplot(224)
|
||||
self.ax_metrics.set_title('Performance Metrics')
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
def run(self, num_steps: Optional[int] = None):
|
||||
"""Run simulation for specified number of steps."""
|
||||
self.running = True
|
||||
start_time = time.time()
|
||||
|
||||
max_steps = num_steps if num_steps is not None else self.config['runtime']['time']['max_steps']
|
||||
|
||||
try:
|
||||
while self.running and self.current_step < max_steps:
|
||||
if not self.paused:
|
||||
self.step()
|
||||
|
||||
# Update visualization
|
||||
if self.config['visualization']['realtime']['enabled'] and \
|
||||
self.current_step % self.config['visualization']['realtime']['update_frequency'] == 0:
|
||||
self._update_visualization()
|
||||
|
||||
# Save data
|
||||
if self.current_step % self.config['data']['export']['frequency'] == 0:
|
||||
self._save_data()
|
||||
|
||||
# Performance monitoring
|
||||
if self.config['performance']['monitoring']['enabled'] and \
|
||||
self.current_step % self.config['performance']['monitoring']['frequency'] == 0:
|
||||
self._monitor_performance()
|
||||
|
||||
except KeyboardInterrupt:
|
||||
self.logger.info("Simulation interrupted by user")
|
||||
finally:
|
||||
self._cleanup()
|
||||
|
||||
end_time = time.time()
|
||||
self.logger.info(f"Simulation completed in {end_time - start_time:.2f} seconds")
|
||||
|
||||
def step(self):
|
||||
"""Execute one simulation step."""
|
||||
step_start = time.time()
|
||||
|
||||
# Update environment
|
||||
self.environment.step(self.config['runtime']['time']['timestep'])
|
||||
self.environment.update(self.timestep)
|
||||
|
||||
# Update colony
|
||||
self.colony.step(self.config['runtime']['time']['timestep'])
|
||||
self.colony.update(self.timestep)
|
||||
|
||||
# Collect data
|
||||
self._collect_data()
|
||||
|
||||
# Update step counter
|
||||
# Increment step counter
|
||||
self.current_step += 1
|
||||
|
||||
# Log step time
|
||||
step_time = time.time() - step_start
|
||||
self.performance_metrics['step_times'].append(step_time)
|
||||
|
||||
def _collect_data(self):
|
||||
"""Collect simulation data."""
|
||||
# Basic metrics
|
||||
self.data['time'].append(self.current_step * self.config['runtime']['time']['timestep'])
|
||||
self.data['population'].append(self.colony.stats.population)
|
||||
"""Collect simulation data for analysis and visualization."""
|
||||
self.data['time'].append(self.current_step * self.timestep)
|
||||
|
||||
# Resources
|
||||
self.data['resources'].append(self.colony.stats.resource_levels.copy())
|
||||
# Collect resource data
|
||||
resource_data = {
|
||||
rtype: sum(r.amount for r in resources)
|
||||
for rtype, resources in self.environment.resources.items()
|
||||
}
|
||||
self.data['resources'].append(resource_data)
|
||||
|
||||
# Task distribution
|
||||
self.data['task_distribution'].append(self.colony.stats.task_distribution.copy())
|
||||
# Collect task distribution
|
||||
task_dist = self.colony.get_task_distribution()
|
||||
self.data['task_distribution'].append(task_dist)
|
||||
|
||||
# Efficiency metrics
|
||||
self.data['efficiency_metrics'].append(self.colony.stats.efficiency_metrics.copy())
|
||||
# Collect efficiency metrics
|
||||
metrics = self.colony.compute_efficiency_metrics()
|
||||
self.data['efficiency_metrics'].append(metrics)
|
||||
|
||||
# Coordination metrics
|
||||
self.data['coordination_metrics'].append(self.colony.stats.coordination_metrics.copy())
|
||||
def run(self, headless: bool = False):
|
||||
"""Run the simulation."""
|
||||
self.logger.info("Starting simulation...")
|
||||
|
||||
if not headless:
|
||||
# Create and show animation
|
||||
animation = self.visualizer.create_animation(self)
|
||||
plt.show()
|
||||
else:
|
||||
# Run without visualization
|
||||
try:
|
||||
while self.current_step < self.max_steps and not self.paused:
|
||||
self.step()
|
||||
|
||||
# Save data periodically
|
||||
if self.current_step % self.config['data_collection']['frequency'] == 0:
|
||||
self.data_collector.save_data(self.data)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
self.logger.info("Simulation interrupted by user")
|
||||
finally:
|
||||
# Save final data
|
||||
self.data_collector.save_data(self.data)
|
||||
|
||||
self.logger.info(f"Simulation completed after {self.current_step} steps")
|
||||
|
||||
def pause(self):
|
||||
"""Pause the simulation."""
|
||||
self.paused = True
|
||||
self.logger.info("Simulation paused")
|
||||
|
||||
def resume(self):
|
||||
"""Resume the simulation."""
|
||||
self.paused = False
|
||||
self.logger.info("Simulation resumed")
|
||||
|
||||
def reset(self):
|
||||
"""Reset the simulation to initial state."""
|
||||
self.logger.info("Resetting simulation...")
|
||||
self.current_step = 0
|
||||
self.environment.reset()
|
||||
self.colony.reset()
|
||||
self.data = {
|
||||
'time': [],
|
||||
'resources': [],
|
||||
'task_distribution': [],
|
||||
'efficiency_metrics': []
|
||||
}
|
||||
|
||||
def _update_visualization(self):
|
||||
"""Update visualization plots."""
|
||||
@ -299,10 +270,10 @@ class Simulation:
|
||||
sim_group.create_dataset('step', data=self.current_step)
|
||||
|
||||
# Save colony data
|
||||
colony_group.create_dataset('population', data=np.array(self.data['population']))
|
||||
colony_group.create_dataset('population', data=np.array(self.colony.stats.population))
|
||||
|
||||
# Create datasets for dictionary data
|
||||
for key in ['resources', 'task_distribution', 'efficiency_metrics', 'coordination_metrics']:
|
||||
for key in ['resources', 'task_distribution', 'efficiency_metrics']:
|
||||
if len(self.data[key]) > 0:
|
||||
group = colony_group.create_group(key)
|
||||
for metric_key in self.data[key][0].keys():
|
||||
@ -317,7 +288,7 @@ class Simulation:
|
||||
# Save basic metrics
|
||||
pd.DataFrame({
|
||||
'time': self.data['time'],
|
||||
'population': self.data['population']
|
||||
'population': self.colony.stats.population
|
||||
}).to_csv(output_dir / 'basic_metrics.csv', index=False)
|
||||
|
||||
# Save resource data
|
||||
@ -378,17 +349,16 @@ class Simulation:
|
||||
self.logger.info(f"Final population: {self.colony.stats.population}")
|
||||
self.logger.info(f"Final resource levels: {self.colony.stats.resource_levels}")
|
||||
|
||||
def pause(self):
|
||||
"""Pause the simulation."""
|
||||
self.paused = True
|
||||
self.logger.info("Simulation paused")
|
||||
|
||||
def resume(self):
|
||||
"""Resume the simulation."""
|
||||
self.paused = False
|
||||
self.logger.info("Simulation resumed")
|
||||
|
||||
def stop(self):
|
||||
"""Stop the simulation."""
|
||||
self.running = False
|
||||
self.logger.info("Simulation stopped")
|
||||
if __name__ == '__main__':
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='Run Ant Colony Simulation')
|
||||
parser.add_argument('--config', type=str, required=True,
|
||||
help='Path to configuration file')
|
||||
parser.add_argument('--headless', action='store_true',
|
||||
help='Run without visualization')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Create and run simulation
|
||||
sim = Simulation(args.config)
|
||||
sim.run(headless=args.headless)
|
||||
374
Things/Ant_Colony/visualization.py
Обычный файл
374
Things/Ant_Colony/visualization.py
Обычный файл
@ -0,0 +1,374 @@
|
||||
"""
|
||||
Enhanced Visualization Module for Ant Colony Simulation
|
||||
|
||||
This module provides advanced visualization capabilities for the ant colony simulation,
|
||||
including animated pheromone trails, agent movements, and colony statistics.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.animation import FuncAnimation
|
||||
import seaborn as sns
|
||||
from typing import Dict, List, Optional
|
||||
from matplotlib.patches import Circle, Wedge, Path, PathPatch
|
||||
from matplotlib.collections import PatchCollection
|
||||
import colorsys
|
||||
import networkx as nx
|
||||
|
||||
class ColonyVisualizer:
|
||||
"""Advanced visualization for the ant colony simulation."""
|
||||
|
||||
def __init__(self, config: dict):
|
||||
"""Initialize visualizer."""
|
||||
self.config = config
|
||||
self.fig = None
|
||||
self.axes = {}
|
||||
self.artists = {}
|
||||
self.animation = None
|
||||
|
||||
# Color schemes
|
||||
self.color_schemes = {
|
||||
'terrain': plt.cm.terrain,
|
||||
'pheromone': {
|
||||
'food': plt.cm.Greens,
|
||||
'home': plt.cm.Reds,
|
||||
'alarm': plt.cm.Oranges,
|
||||
'trail': plt.cm.Blues
|
||||
},
|
||||
'agents': {
|
||||
'foraging': '#2ecc71',
|
||||
'maintenance': '#3498db',
|
||||
'nursing': '#9b59b6',
|
||||
'defense': '#e74c3c',
|
||||
'exploration': '#f1c40f'
|
||||
}
|
||||
}
|
||||
|
||||
# Setup visualization
|
||||
self._setup_visualization()
|
||||
|
||||
def _setup_visualization(self):
|
||||
"""Set up the visualization layout."""
|
||||
plt.style.use('dark_background')
|
||||
|
||||
# Create figure with custom layout
|
||||
self.fig = plt.figure(figsize=(16, 9))
|
||||
gs = self.fig.add_gridspec(3, 3)
|
||||
|
||||
# Main simulation view (larger)
|
||||
self.axes['main'] = self.fig.add_subplot(gs[0:2, 0:2])
|
||||
self.axes['main'].set_title('Colony Simulation')
|
||||
|
||||
# Pheromone levels
|
||||
self.axes['pheromone'] = self.fig.add_subplot(gs[0, 2])
|
||||
self.axes['pheromone'].set_title('Pheromone Levels')
|
||||
|
||||
# Resource levels
|
||||
self.axes['resources'] = self.fig.add_subplot(gs[1, 2])
|
||||
self.axes['resources'].set_title('Resource Levels')
|
||||
|
||||
# Task distribution
|
||||
self.axes['tasks'] = self.fig.add_subplot(gs[2, 0])
|
||||
self.axes['tasks'].set_title('Task Distribution')
|
||||
|
||||
# Efficiency metrics
|
||||
self.axes['metrics'] = self.fig.add_subplot(gs[2, 1])
|
||||
self.axes['metrics'].set_title('Colony Metrics')
|
||||
|
||||
# Network view
|
||||
self.axes['network'] = self.fig.add_subplot(gs[2, 2])
|
||||
self.axes['network'].set_title('Social Network')
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
def create_animation(self, simulation, interval: int = 50) -> FuncAnimation:
|
||||
"""Create animation of the colony simulation."""
|
||||
def update(frame):
|
||||
# Update simulation state
|
||||
if not simulation.paused:
|
||||
simulation.step()
|
||||
|
||||
# Clear all axes
|
||||
for ax in self.axes.values():
|
||||
ax.clear()
|
||||
|
||||
# Update all plots
|
||||
artists = []
|
||||
artists.extend(self._plot_main_view(simulation))
|
||||
artists.extend(self._plot_pheromone_levels(simulation))
|
||||
artists.extend(self._plot_resource_levels(simulation))
|
||||
artists.extend(self._plot_task_distribution(simulation))
|
||||
artists.extend(self._plot_efficiency_metrics(simulation))
|
||||
artists.extend(self._plot_social_network(simulation))
|
||||
|
||||
return artists
|
||||
|
||||
self.animation = FuncAnimation(
|
||||
self.fig,
|
||||
update,
|
||||
frames=None,
|
||||
interval=interval,
|
||||
blit=True
|
||||
)
|
||||
|
||||
return self.animation
|
||||
|
||||
def _plot_main_view(self, simulation) -> List:
|
||||
"""Plot main simulation view with enhanced graphics."""
|
||||
ax = self.axes['main']
|
||||
artists = []
|
||||
|
||||
# Plot terrain with enhanced colormap
|
||||
terrain = simulation.environment.terrain.height_map
|
||||
terrain_img = ax.imshow(terrain, cmap=self.color_schemes['terrain'])
|
||||
artists.append(terrain_img)
|
||||
|
||||
# Plot pheromone trails with alpha blending
|
||||
for ptype, pdata in simulation.environment.pheromones.layers.items():
|
||||
pheromone_grid = pdata['grid']
|
||||
if np.any(pheromone_grid > 0):
|
||||
pheromone_img = ax.imshow(
|
||||
pheromone_grid,
|
||||
cmap=self.color_schemes['pheromone'][ptype],
|
||||
alpha=0.5
|
||||
)
|
||||
artists.append(pheromone_img)
|
||||
|
||||
# Plot agents with directional markers
|
||||
for agent in simulation.colony.agents:
|
||||
agent_color = self.color_schemes['agents'][agent.current_task.value]
|
||||
|
||||
# Create ant shape
|
||||
ant = self._create_ant_shape(agent.position, agent.orientation,
|
||||
size=1.0, color=agent_color)
|
||||
ax.add_patch(ant)
|
||||
artists.append(ant)
|
||||
|
||||
# Add carrying indicator if needed
|
||||
if agent.carrying is not None:
|
||||
carry_indicator = Circle(
|
||||
(agent.position[0], agent.position[1]),
|
||||
radius=0.3,
|
||||
color='yellow',
|
||||
alpha=0.7
|
||||
)
|
||||
ax.add_patch(carry_indicator)
|
||||
artists.append(carry_indicator)
|
||||
|
||||
# Plot resources
|
||||
for resource in simulation.environment.resources:
|
||||
if resource.type == 'food':
|
||||
marker = '*'
|
||||
color = 'yellow'
|
||||
else:
|
||||
marker = 's'
|
||||
color = 'cyan'
|
||||
|
||||
resource_dot = ax.scatter(
|
||||
resource.position.x,
|
||||
resource.position.y,
|
||||
c=color,
|
||||
marker=marker,
|
||||
s=50
|
||||
)
|
||||
artists.append(resource_dot)
|
||||
|
||||
# Plot nest with concentric circles
|
||||
nest_x = simulation.colony.nest_position.x
|
||||
nest_y = simulation.colony.nest_position.y
|
||||
|
||||
for radius in [1, 2, 3]:
|
||||
nest_circle = Circle(
|
||||
(nest_x, nest_y),
|
||||
radius,
|
||||
color='white',
|
||||
fill=False,
|
||||
alpha=0.5
|
||||
)
|
||||
ax.add_patch(nest_circle)
|
||||
artists.append(nest_circle)
|
||||
|
||||
# Add nest center
|
||||
nest_center = ax.scatter(
|
||||
[nest_x], [nest_y],
|
||||
c='white',
|
||||
marker='s',
|
||||
s=100
|
||||
)
|
||||
artists.append(nest_center)
|
||||
|
||||
# Set limits and title
|
||||
ax.set_xlim(0, simulation.environment.size[0])
|
||||
ax.set_ylim(0, simulation.environment.size[1])
|
||||
ax.set_title(f'Colony Simulation (Step {simulation.current_step})')
|
||||
|
||||
return artists
|
||||
|
||||
def _create_ant_shape(self, position, orientation, size=1.0, color='white'):
|
||||
"""Create an ant-like shape for visualization."""
|
||||
# Create ant body segments
|
||||
body_verts = [
|
||||
(-0.5, -0.2), # Abdomen
|
||||
(0.0, -0.1), # Thorax
|
||||
(0.5, 0.0), # Head
|
||||
(0.0, 0.1),
|
||||
(-0.5, 0.2)
|
||||
]
|
||||
|
||||
# Scale and rotate vertices
|
||||
cos_theta = np.cos(orientation)
|
||||
sin_theta = np.sin(orientation)
|
||||
|
||||
transformed_verts = []
|
||||
for x, y in body_verts:
|
||||
x_rot = x * cos_theta - y * sin_theta
|
||||
y_rot = x * sin_theta + y * cos_theta
|
||||
transformed_verts.append(
|
||||
(position[0] + x_rot * size,
|
||||
position[1] + y_rot * size)
|
||||
)
|
||||
|
||||
# Create path
|
||||
codes = [Path.MOVETO] + [Path.LINETO] * (len(transformed_verts) - 1)
|
||||
path = Path(transformed_verts, codes)
|
||||
|
||||
return PathPatch(path, facecolor=color, edgecolor='none', alpha=0.7)
|
||||
|
||||
def _plot_pheromone_levels(self, simulation) -> List:
|
||||
"""Plot pheromone concentration levels."""
|
||||
ax = self.axes['pheromone']
|
||||
artists = []
|
||||
|
||||
# Get pheromone data
|
||||
pheromone_levels = []
|
||||
labels = []
|
||||
colors = []
|
||||
|
||||
for ptype, pdata in simulation.environment.pheromones.layers.items():
|
||||
pheromone_levels.append(np.mean(pdata['grid']))
|
||||
labels.append(ptype)
|
||||
colors.append(self.color_schemes['agents'].get(ptype, '#95a5a6'))
|
||||
|
||||
# Create bar plot
|
||||
if pheromone_levels:
|
||||
bars = ax.bar(range(len(pheromone_levels)), pheromone_levels,
|
||||
color=colors)
|
||||
artists.extend(bars)
|
||||
|
||||
ax.set_xticks(range(len(labels)))
|
||||
ax.set_xticklabels(labels, rotation=45)
|
||||
ax.set_ylabel('Average Concentration')
|
||||
|
||||
return artists
|
||||
|
||||
def _plot_resource_levels(self, simulation) -> List:
|
||||
"""Plot resource levels over time."""
|
||||
ax = self.axes['resources']
|
||||
artists = []
|
||||
|
||||
if len(simulation.data['time']) > 0:
|
||||
for resource_type in simulation.colony.resources.keys():
|
||||
values = [d[resource_type] for d in simulation.data['resources']]
|
||||
line = ax.plot(simulation.data['time'][-100:], values[-100:],
|
||||
label=resource_type)
|
||||
artists.extend(line)
|
||||
|
||||
ax.legend()
|
||||
ax.set_xlabel('Time')
|
||||
ax.set_ylabel('Amount')
|
||||
|
||||
return artists
|
||||
|
||||
def _plot_task_distribution(self, simulation) -> List:
|
||||
"""Plot task distribution with enhanced graphics."""
|
||||
ax = self.axes['tasks']
|
||||
artists = []
|
||||
|
||||
if len(simulation.data['task_distribution']) > 0:
|
||||
latest_dist = simulation.data['task_distribution'][-1]
|
||||
tasks = list(latest_dist.keys())
|
||||
counts = [latest_dist[task] for task in tasks]
|
||||
colors = [self.color_schemes['agents'][task.value] for task in tasks]
|
||||
|
||||
# Create stacked bars for current and needed agents
|
||||
bars = ax.bar(range(len(tasks)), counts, color=colors)
|
||||
artists.extend(bars)
|
||||
|
||||
# Add target lines for needed agents
|
||||
for i, task in enumerate(tasks):
|
||||
need = simulation.colony.task_needs[task] * len(simulation.colony.agents)
|
||||
line = ax.hlines(need, i-0.4, i+0.4, colors='white', linestyles='--')
|
||||
artists.append(line)
|
||||
|
||||
ax.set_xticks(range(len(tasks)))
|
||||
ax.set_xticklabels([task.value for task in tasks], rotation=45)
|
||||
|
||||
return artists
|
||||
|
||||
def _plot_efficiency_metrics(self, simulation) -> List:
|
||||
"""Plot efficiency metrics with enhanced graphics."""
|
||||
ax = self.axes['metrics']
|
||||
artists = []
|
||||
|
||||
if len(simulation.data['efficiency_metrics']) > 0:
|
||||
metrics = simulation.data['efficiency_metrics'][-1]
|
||||
values = list(metrics.values())
|
||||
labels = list(metrics.keys())
|
||||
|
||||
# Create radar chart
|
||||
angles = np.linspace(0, 2*np.pi, len(labels), endpoint=False)
|
||||
values = np.concatenate((values, [values[0]])) # Close the polygon
|
||||
angles = np.concatenate((angles, [angles[0]])) # Close the polygon
|
||||
|
||||
# Plot radar
|
||||
line = ax.plot(angles, values)
|
||||
artists.extend(line)
|
||||
|
||||
# Fill radar
|
||||
fill = ax.fill(angles, values, alpha=0.25)
|
||||
artists.extend(fill)
|
||||
|
||||
# Add labels
|
||||
for angle, label in zip(angles[:-1], labels):
|
||||
ha = 'right' if np.cos(angle) < 0 else 'left'
|
||||
va = 'top' if np.sin(angle) < 0 else 'bottom'
|
||||
|
||||
ax.text(angle, 1.2, label,
|
||||
ha=ha, va=va,
|
||||
rotation=np.degrees(angle))
|
||||
|
||||
ax.set_ylim(0, 1)
|
||||
ax.set_xticks([])
|
||||
|
||||
return artists
|
||||
|
||||
def _plot_social_network(self, simulation) -> List:
|
||||
"""Plot social network with enhanced graphics."""
|
||||
ax = self.axes['network']
|
||||
artists = []
|
||||
|
||||
# Get network data
|
||||
G = simulation.colony.interaction_network
|
||||
|
||||
if len(G.nodes) > 0:
|
||||
# Calculate node positions using spring layout
|
||||
pos = nx.spring_layout(G)
|
||||
|
||||
# Draw edges
|
||||
for edge in G.edges():
|
||||
x = [pos[edge[0]][0], pos[edge[1]][0]]
|
||||
y = [pos[edge[0]][1], pos[edge[1]][1]]
|
||||
line = ax.plot(x, y, 'w-', alpha=0.2)
|
||||
artists.extend(line)
|
||||
|
||||
# Draw nodes
|
||||
for node in G.nodes():
|
||||
color = self.color_schemes['agents'][node.current_task.value]
|
||||
dot = ax.scatter(pos[node][0], pos[node][1],
|
||||
c=color, s=50)
|
||||
artists.append(dot)
|
||||
|
||||
ax.set_xticks([])
|
||||
ax.set_yticks([])
|
||||
|
||||
return artists
|
||||
123
Things/Ant_Colony/visualization/renderer.py
Обычный файл
123
Things/Ant_Colony/visualization/renderer.py
Обычный файл
@ -0,0 +1,123 @@
|
||||
"""
|
||||
Visualization module for the ant colony simulation using matplotlib.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.patches import Circle, Wedge
|
||||
from matplotlib.collections import PatchCollection
|
||||
import matplotlib.animation as animation
|
||||
|
||||
class SimulationRenderer:
|
||||
"""Handles visualization of the ant colony simulation."""
|
||||
|
||||
def __init__(self, config: dict):
|
||||
"""Initialize the renderer with configuration settings."""
|
||||
self.config = config
|
||||
self.viz_config = config['visualization']
|
||||
self.env_size = config['environment']['size']
|
||||
|
||||
# Setup the figure and axis
|
||||
self.fig, self.ax = plt.subplots(figsize=(10, 10))
|
||||
self.ax.set_xlim(0, self.env_size[0])
|
||||
self.ax.set_ylim(0, self.env_size[1])
|
||||
self.ax.set_aspect('equal')
|
||||
|
||||
# Initialize collections for different elements
|
||||
self.agent_patches = []
|
||||
self.food_patches = []
|
||||
self.obstacle_patches = []
|
||||
self.pheromone_plots = {}
|
||||
|
||||
# Setup the nest
|
||||
nest_loc = config['environment']['nest_location']
|
||||
nest = Circle(nest_loc, 5, color=self.viz_config['colors']['nest'])
|
||||
self.ax.add_patch(nest)
|
||||
|
||||
def update(self, world_state: dict) -> None:
|
||||
"""Update the visualization with current world state."""
|
||||
# Clear previous patches
|
||||
for patch in self.agent_patches + self.food_patches + self.obstacle_patches:
|
||||
patch.remove()
|
||||
self.agent_patches.clear()
|
||||
self.food_patches.clear()
|
||||
self.obstacle_patches.clear()
|
||||
|
||||
# Update pheromones
|
||||
for p_type, grid in world_state['pheromones'].items():
|
||||
if p_type not in self.pheromone_plots:
|
||||
color = self.viz_config['colors']['pheromones'][p_type]
|
||||
self.pheromone_plots[p_type] = self.ax.imshow(
|
||||
grid,
|
||||
extent=[0, self.env_size[0], 0, self.env_size[1]],
|
||||
cmap='Greens' if p_type == 'food' else 'Reds',
|
||||
alpha=0.3,
|
||||
vmin=0,
|
||||
vmax=1
|
||||
)
|
||||
else:
|
||||
self.pheromone_plots[p_type].set_array(grid)
|
||||
|
||||
# Draw agents
|
||||
for agent in world_state['agents']:
|
||||
color = self.viz_config['colors']['agents'][agent.current_task.value]
|
||||
agent_patch = self._create_agent_patch(agent, color)
|
||||
self.ax.add_patch(agent_patch)
|
||||
self.agent_patches.append(agent_patch)
|
||||
|
||||
# Draw food sources
|
||||
for food in world_state['resources']:
|
||||
food_patch = Circle(
|
||||
(food.position.x, food.position.y),
|
||||
food.size,
|
||||
color=self.viz_config['colors']['food']
|
||||
)
|
||||
self.ax.add_patch(food_patch)
|
||||
self.food_patches.append(food_patch)
|
||||
|
||||
# Draw obstacles
|
||||
for obstacle in world_state['obstacles']:
|
||||
obstacle_patch = Circle(
|
||||
(obstacle.position.x, obstacle.position.y),
|
||||
obstacle.size,
|
||||
color=self.viz_config['colors']['obstacles']
|
||||
)
|
||||
self.ax.add_patch(obstacle_patch)
|
||||
self.obstacle_patches.append(obstacle_patch)
|
||||
|
||||
# Trigger redraw
|
||||
self.fig.canvas.draw()
|
||||
self.fig.canvas.flush_events()
|
||||
|
||||
def _create_agent_patch(self, agent, color: str) -> Wedge:
|
||||
"""Create a wedge patch to represent an agent."""
|
||||
# Create a wedge shape to show orientation
|
||||
radius = 1.0
|
||||
angle = np.degrees(agent.position.theta)
|
||||
wedge = Wedge(
|
||||
(agent.position.x, agent.position.y),
|
||||
radius,
|
||||
angle - 30, # 60 degree wide wedge
|
||||
angle + 30,
|
||||
color=color
|
||||
)
|
||||
return wedge
|
||||
|
||||
def save_animation(self, frames: list, filename: str) -> None:
|
||||
"""Save the simulation as an animation."""
|
||||
anim = animation.ArtistAnimation(
|
||||
self.fig,
|
||||
frames,
|
||||
interval=50,
|
||||
blit=True,
|
||||
repeat=False
|
||||
)
|
||||
anim.save(filename, writer='pillow')
|
||||
|
||||
def show(self) -> None:
|
||||
"""Display the current visualization."""
|
||||
plt.show()
|
||||
|
||||
def close(self) -> None:
|
||||
"""Close the visualization window."""
|
||||
plt.close(self.fig)
|
||||
247
ant_colony_env/bin/Activate.ps1
Обычный файл
247
ant_colony_env/bin/Activate.ps1
Обычный файл
@ -0,0 +1,247 @@
|
||||
<#
|
||||
.Synopsis
|
||||
Activate a Python virtual environment for the current PowerShell session.
|
||||
|
||||
.Description
|
||||
Pushes the python executable for a virtual environment to the front of the
|
||||
$Env:PATH environment variable and sets the prompt to signify that you are
|
||||
in a Python virtual environment. Makes use of the command line switches as
|
||||
well as the `pyvenv.cfg` file values present in the virtual environment.
|
||||
|
||||
.Parameter VenvDir
|
||||
Path to the directory that contains the virtual environment to activate. The
|
||||
default value for this is the parent of the directory that the Activate.ps1
|
||||
script is located within.
|
||||
|
||||
.Parameter Prompt
|
||||
The prompt prefix to display when this virtual environment is activated. By
|
||||
default, this prompt is the name of the virtual environment folder (VenvDir)
|
||||
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
|
||||
|
||||
.Example
|
||||
Activate.ps1
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -Verbose
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script,
|
||||
and shows extra information about the activation as it executes.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
|
||||
Activates the Python virtual environment located in the specified location.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -Prompt "MyPython"
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script,
|
||||
and prefixes the current prompt with the specified string (surrounded in
|
||||
parentheses) while the virtual environment is active.
|
||||
|
||||
.Notes
|
||||
On Windows, it may be required to enable this Activate.ps1 script by setting the
|
||||
execution policy for the user. You can do this by issuing the following PowerShell
|
||||
command:
|
||||
|
||||
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
|
||||
|
||||
For more information on Execution Policies:
|
||||
https://go.microsoft.com/fwlink/?LinkID=135170
|
||||
|
||||
#>
|
||||
Param(
|
||||
[Parameter(Mandatory = $false)]
|
||||
[String]
|
||||
$VenvDir,
|
||||
[Parameter(Mandatory = $false)]
|
||||
[String]
|
||||
$Prompt
|
||||
)
|
||||
|
||||
<# Function declarations --------------------------------------------------- #>
|
||||
|
||||
<#
|
||||
.Synopsis
|
||||
Remove all shell session elements added by the Activate script, including the
|
||||
addition of the virtual environment's Python executable from the beginning of
|
||||
the PATH variable.
|
||||
|
||||
.Parameter NonDestructive
|
||||
If present, do not remove this function from the global namespace for the
|
||||
session.
|
||||
|
||||
#>
|
||||
function global:deactivate ([switch]$NonDestructive) {
|
||||
# Revert to original values
|
||||
|
||||
# The prior prompt:
|
||||
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
|
||||
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
|
||||
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
|
||||
}
|
||||
|
||||
# The prior PYTHONHOME:
|
||||
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
|
||||
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
|
||||
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
|
||||
}
|
||||
|
||||
# The prior PATH:
|
||||
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
|
||||
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
|
||||
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
|
||||
}
|
||||
|
||||
# Just remove the VIRTUAL_ENV altogether:
|
||||
if (Test-Path -Path Env:VIRTUAL_ENV) {
|
||||
Remove-Item -Path env:VIRTUAL_ENV
|
||||
}
|
||||
|
||||
# Just remove VIRTUAL_ENV_PROMPT altogether.
|
||||
if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
|
||||
Remove-Item -Path env:VIRTUAL_ENV_PROMPT
|
||||
}
|
||||
|
||||
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
|
||||
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
|
||||
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
|
||||
}
|
||||
|
||||
# Leave deactivate function in the global namespace if requested:
|
||||
if (-not $NonDestructive) {
|
||||
Remove-Item -Path function:deactivate
|
||||
}
|
||||
}
|
||||
|
||||
<#
|
||||
.Description
|
||||
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
|
||||
given folder, and returns them in a map.
|
||||
|
||||
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
|
||||
two strings separated by `=` (with any amount of whitespace surrounding the =)
|
||||
then it is considered a `key = value` line. The left hand string is the key,
|
||||
the right hand is the value.
|
||||
|
||||
If the value starts with a `'` or a `"` then the first and last character is
|
||||
stripped from the value before being captured.
|
||||
|
||||
.Parameter ConfigDir
|
||||
Path to the directory that contains the `pyvenv.cfg` file.
|
||||
#>
|
||||
function Get-PyVenvConfig(
|
||||
[String]
|
||||
$ConfigDir
|
||||
) {
|
||||
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
|
||||
|
||||
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
|
||||
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
|
||||
|
||||
# An empty map will be returned if no config file is found.
|
||||
$pyvenvConfig = @{ }
|
||||
|
||||
if ($pyvenvConfigPath) {
|
||||
|
||||
Write-Verbose "File exists, parse `key = value` lines"
|
||||
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
|
||||
|
||||
$pyvenvConfigContent | ForEach-Object {
|
||||
$keyval = $PSItem -split "\s*=\s*", 2
|
||||
if ($keyval[0] -and $keyval[1]) {
|
||||
$val = $keyval[1]
|
||||
|
||||
# Remove extraneous quotations around a string value.
|
||||
if ("'""".Contains($val.Substring(0, 1))) {
|
||||
$val = $val.Substring(1, $val.Length - 2)
|
||||
}
|
||||
|
||||
$pyvenvConfig[$keyval[0]] = $val
|
||||
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
|
||||
}
|
||||
}
|
||||
}
|
||||
return $pyvenvConfig
|
||||
}
|
||||
|
||||
|
||||
<# Begin Activate script --------------------------------------------------- #>
|
||||
|
||||
# Determine the containing directory of this script
|
||||
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
|
||||
$VenvExecDir = Get-Item -Path $VenvExecPath
|
||||
|
||||
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
|
||||
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
|
||||
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
|
||||
|
||||
# Set values required in priority: CmdLine, ConfigFile, Default
|
||||
# First, get the location of the virtual environment, it might not be
|
||||
# VenvExecDir if specified on the command line.
|
||||
if ($VenvDir) {
|
||||
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
|
||||
}
|
||||
else {
|
||||
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
|
||||
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
|
||||
Write-Verbose "VenvDir=$VenvDir"
|
||||
}
|
||||
|
||||
# Next, read the `pyvenv.cfg` file to determine any required value such
|
||||
# as `prompt`.
|
||||
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
|
||||
|
||||
# Next, set the prompt from the command line, or the config file, or
|
||||
# just use the name of the virtual environment folder.
|
||||
if ($Prompt) {
|
||||
Write-Verbose "Prompt specified as argument, using '$Prompt'"
|
||||
}
|
||||
else {
|
||||
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
|
||||
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
|
||||
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
|
||||
$Prompt = $pyvenvCfg['prompt'];
|
||||
}
|
||||
else {
|
||||
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
|
||||
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
|
||||
$Prompt = Split-Path -Path $venvDir -Leaf
|
||||
}
|
||||
}
|
||||
|
||||
Write-Verbose "Prompt = '$Prompt'"
|
||||
Write-Verbose "VenvDir='$VenvDir'"
|
||||
|
||||
# Deactivate any currently active virtual environment, but leave the
|
||||
# deactivate function in place.
|
||||
deactivate -nondestructive
|
||||
|
||||
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
|
||||
# that there is an activated venv.
|
||||
$env:VIRTUAL_ENV = $VenvDir
|
||||
|
||||
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
|
||||
|
||||
Write-Verbose "Setting prompt to '$Prompt'"
|
||||
|
||||
# Set the prompt to include the env name
|
||||
# Make sure _OLD_VIRTUAL_PROMPT is global
|
||||
function global:_OLD_VIRTUAL_PROMPT { "" }
|
||||
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
|
||||
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
|
||||
|
||||
function global:prompt {
|
||||
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
|
||||
_OLD_VIRTUAL_PROMPT
|
||||
}
|
||||
$env:VIRTUAL_ENV_PROMPT = $Prompt
|
||||
}
|
||||
|
||||
# Clear PYTHONHOME
|
||||
if (Test-Path -Path Env:PYTHONHOME) {
|
||||
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
|
||||
Remove-Item -Path Env:PYTHONHOME
|
||||
}
|
||||
|
||||
# Add the venv to the PATH
|
||||
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
|
||||
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
|
||||
69
ant_colony_env/bin/activate
Обычный файл
69
ant_colony_env/bin/activate
Обычный файл
@ -0,0 +1,69 @@
|
||||
# This file must be used with "source bin/activate" *from bash*
|
||||
# you cannot run it directly
|
||||
|
||||
deactivate () {
|
||||
# reset old environment variables
|
||||
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
|
||||
PATH="${_OLD_VIRTUAL_PATH:-}"
|
||||
export PATH
|
||||
unset _OLD_VIRTUAL_PATH
|
||||
fi
|
||||
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
|
||||
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
|
||||
export PYTHONHOME
|
||||
unset _OLD_VIRTUAL_PYTHONHOME
|
||||
fi
|
||||
|
||||
# This should detect bash and zsh, which have a hash command that must
|
||||
# be called to get it to forget past commands. Without forgetting
|
||||
# past commands the $PATH changes we made may not be respected
|
||||
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
|
||||
hash -r 2> /dev/null
|
||||
fi
|
||||
|
||||
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
|
||||
PS1="${_OLD_VIRTUAL_PS1:-}"
|
||||
export PS1
|
||||
unset _OLD_VIRTUAL_PS1
|
||||
fi
|
||||
|
||||
unset VIRTUAL_ENV
|
||||
unset VIRTUAL_ENV_PROMPT
|
||||
if [ ! "${1:-}" = "nondestructive" ] ; then
|
||||
# Self destruct!
|
||||
unset -f deactivate
|
||||
fi
|
||||
}
|
||||
|
||||
# unset irrelevant variables
|
||||
deactivate nondestructive
|
||||
|
||||
VIRTUAL_ENV=/home/trim/Documents/GitHub/cognitive/ant_colony_env
|
||||
export VIRTUAL_ENV
|
||||
|
||||
_OLD_VIRTUAL_PATH="$PATH"
|
||||
PATH="$VIRTUAL_ENV/"bin":$PATH"
|
||||
export PATH
|
||||
|
||||
# unset PYTHONHOME if set
|
||||
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
|
||||
# could use `if (set -u; : $PYTHONHOME) ;` in bash
|
||||
if [ -n "${PYTHONHOME:-}" ] ; then
|
||||
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
|
||||
unset PYTHONHOME
|
||||
fi
|
||||
|
||||
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
|
||||
_OLD_VIRTUAL_PS1="${PS1:-}"
|
||||
PS1='(ant_colony_env) '"${PS1:-}"
|
||||
export PS1
|
||||
VIRTUAL_ENV_PROMPT='(ant_colony_env) '
|
||||
export VIRTUAL_ENV_PROMPT
|
||||
fi
|
||||
|
||||
# This should detect bash and zsh, which have a hash command that must
|
||||
# be called to get it to forget past commands. Without forgetting
|
||||
# past commands the $PATH changes we made may not be respected
|
||||
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
|
||||
hash -r 2> /dev/null
|
||||
fi
|
||||
26
ant_colony_env/bin/activate.csh
Обычный файл
26
ant_colony_env/bin/activate.csh
Обычный файл
@ -0,0 +1,26 @@
|
||||
# This file must be used with "source bin/activate.csh" *from csh*.
|
||||
# You cannot run it directly.
|
||||
# Created by Davide Di Blasi <davidedb@gmail.com>.
|
||||
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
|
||||
|
||||
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
|
||||
|
||||
# Unset irrelevant variables.
|
||||
deactivate nondestructive
|
||||
|
||||
setenv VIRTUAL_ENV /home/trim/Documents/GitHub/cognitive/ant_colony_env
|
||||
|
||||
set _OLD_VIRTUAL_PATH="$PATH"
|
||||
setenv PATH "$VIRTUAL_ENV/"bin":$PATH"
|
||||
|
||||
|
||||
set _OLD_VIRTUAL_PROMPT="$prompt"
|
||||
|
||||
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
|
||||
set prompt = '(ant_colony_env) '"$prompt"
|
||||
setenv VIRTUAL_ENV_PROMPT '(ant_colony_env) '
|
||||
endif
|
||||
|
||||
alias pydoc python -m pydoc
|
||||
|
||||
rehash
|
||||
69
ant_colony_env/bin/activate.fish
Обычный файл
69
ant_colony_env/bin/activate.fish
Обычный файл
@ -0,0 +1,69 @@
|
||||
# This file must be used with "source <venv>/bin/activate.fish" *from fish*
|
||||
# (https://fishshell.com/); you cannot run it directly.
|
||||
|
||||
function deactivate -d "Exit virtual environment and return to normal shell environment"
|
||||
# reset old environment variables
|
||||
if test -n "$_OLD_VIRTUAL_PATH"
|
||||
set -gx PATH $_OLD_VIRTUAL_PATH
|
||||
set -e _OLD_VIRTUAL_PATH
|
||||
end
|
||||
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
|
||||
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
|
||||
set -e _OLD_VIRTUAL_PYTHONHOME
|
||||
end
|
||||
|
||||
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
|
||||
set -e _OLD_FISH_PROMPT_OVERRIDE
|
||||
# prevents error when using nested fish instances (Issue #93858)
|
||||
if functions -q _old_fish_prompt
|
||||
functions -e fish_prompt
|
||||
functions -c _old_fish_prompt fish_prompt
|
||||
functions -e _old_fish_prompt
|
||||
end
|
||||
end
|
||||
|
||||
set -e VIRTUAL_ENV
|
||||
set -e VIRTUAL_ENV_PROMPT
|
||||
if test "$argv[1]" != "nondestructive"
|
||||
# Self-destruct!
|
||||
functions -e deactivate
|
||||
end
|
||||
end
|
||||
|
||||
# Unset irrelevant variables.
|
||||
deactivate nondestructive
|
||||
|
||||
set -gx VIRTUAL_ENV /home/trim/Documents/GitHub/cognitive/ant_colony_env
|
||||
|
||||
set -gx _OLD_VIRTUAL_PATH $PATH
|
||||
set -gx PATH "$VIRTUAL_ENV/"bin $PATH
|
||||
|
||||
# Unset PYTHONHOME if set.
|
||||
if set -q PYTHONHOME
|
||||
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
|
||||
set -e PYTHONHOME
|
||||
end
|
||||
|
||||
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
|
||||
# fish uses a function instead of an env var to generate the prompt.
|
||||
|
||||
# Save the current fish_prompt function as the function _old_fish_prompt.
|
||||
functions -c fish_prompt _old_fish_prompt
|
||||
|
||||
# With the original prompt function renamed, we can override with our own.
|
||||
function fish_prompt
|
||||
# Save the return status of the last command.
|
||||
set -l old_status $status
|
||||
|
||||
# Output the venv prompt; color taken from the blue of the Python logo.
|
||||
printf "%s%s%s" (set_color 4B8BBE) '(ant_colony_env) ' (set_color normal)
|
||||
|
||||
# Restore the return status of the previous command.
|
||||
echo "exit $old_status" | .
|
||||
# Output the original/"old" prompt.
|
||||
_old_fish_prompt
|
||||
end
|
||||
|
||||
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
|
||||
set -gx VIRTUAL_ENV_PROMPT '(ant_colony_env) '
|
||||
end
|
||||
33
ant_colony_env/bin/ant-colony
Исполняемый файл
33
ant_colony_env/bin/ant-colony
Исполняемый файл
@ -0,0 +1,33 @@
|
||||
#!/home/trim/Documents/GitHub/cognitive/ant_colony_env/bin/python3
|
||||
# EASY-INSTALL-ENTRY-SCRIPT: 'ant-colony','console_scripts','ant-colony'
|
||||
import re
|
||||
import sys
|
||||
|
||||
# for compatibility with easy_install; see #2198
|
||||
__requires__ = 'ant-colony'
|
||||
|
||||
try:
|
||||
from importlib.metadata import distribution
|
||||
except ImportError:
|
||||
try:
|
||||
from importlib_metadata import distribution
|
||||
except ImportError:
|
||||
from pkg_resources import load_entry_point
|
||||
|
||||
|
||||
def importlib_load_entry_point(spec, group, name):
|
||||
dist_name, _, _ = spec.partition('==')
|
||||
matches = (
|
||||
entry_point
|
||||
for entry_point in distribution(dist_name).entry_points
|
||||
if entry_point.group == group and entry_point.name == name
|
||||
)
|
||||
return next(matches).load()
|
||||
|
||||
|
||||
globals().setdefault('load_entry_point', importlib_load_entry_point)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(load_entry_point('ant-colony', 'console_scripts', 'ant-colony')())
|
||||
8
ant_colony_env/bin/f2py
Исполняемый файл
8
ant_colony_env/bin/f2py
Исполняемый файл
@ -0,0 +1,8 @@
|
||||
#!/home/trim/Documents/GitHub/cognitive/ant_colony_env/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from numpy.f2py.f2py2e import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
ant_colony_env/bin/fonttools
Исполняемый файл
8
ant_colony_env/bin/fonttools
Исполняемый файл
@ -0,0 +1,8 @@
|
||||
#!/home/trim/Documents/GitHub/cognitive/ant_colony_env/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from fontTools.__main__ import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
ant_colony_env/bin/numpy-config
Исполняемый файл
8
ant_colony_env/bin/numpy-config
Исполняемый файл
@ -0,0 +1,8 @@
|
||||
#!/home/trim/Documents/GitHub/cognitive/ant_colony_env/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from numpy._configtool import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
ant_colony_env/bin/pip
Исполняемый файл
8
ant_colony_env/bin/pip
Исполняемый файл
@ -0,0 +1,8 @@
|
||||
#!/home/trim/Documents/GitHub/cognitive/ant_colony_env/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
ant_colony_env/bin/pip3
Исполняемый файл
8
ant_colony_env/bin/pip3
Исполняемый файл
@ -0,0 +1,8 @@
|
||||
#!/home/trim/Documents/GitHub/cognitive/ant_colony_env/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
ant_colony_env/bin/pip3.11
Исполняемый файл
8
ant_colony_env/bin/pip3.11
Исполняемый файл
@ -0,0 +1,8 @@
|
||||
#!/home/trim/Documents/GitHub/cognitive/ant_colony_env/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
ant_colony_env/bin/pyftmerge
Исполняемый файл
8
ant_colony_env/bin/pyftmerge
Исполняемый файл
@ -0,0 +1,8 @@
|
||||
#!/home/trim/Documents/GitHub/cognitive/ant_colony_env/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from fontTools.merge import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
8
ant_colony_env/bin/pyftsubset
Исполняемый файл
8
ant_colony_env/bin/pyftsubset
Исполняемый файл
@ -0,0 +1,8 @@
|
||||
#!/home/trim/Documents/GitHub/cognitive/ant_colony_env/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from fontTools.subset import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
1
ant_colony_env/bin/python
Символическая ссылка
1
ant_colony_env/bin/python
Символическая ссылка
@ -0,0 +1 @@
|
||||
python3
|
||||
1
ant_colony_env/bin/python3
Символическая ссылка
1
ant_colony_env/bin/python3
Символическая ссылка
@ -0,0 +1 @@
|
||||
/usr/bin/python3
|
||||
1
ant_colony_env/bin/python3.11
Символическая ссылка
1
ant_colony_env/bin/python3.11
Символическая ссылка
@ -0,0 +1 @@
|
||||
python3
|
||||
8
ant_colony_env/bin/ttx
Исполняемый файл
8
ant_colony_env/bin/ttx
Исполняемый файл
@ -0,0 +1,8 @@
|
||||
#!/home/trim/Documents/GitHub/cognitive/ant_colony_env/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from fontTools.ttx import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
1
ant_colony_env/lib64
Символическая ссылка
1
ant_colony_env/lib64
Символическая ссылка
@ -0,0 +1 @@
|
||||
lib
|
||||
5
ant_colony_env/pyvenv.cfg
Обычный файл
5
ant_colony_env/pyvenv.cfg
Обычный файл
@ -0,0 +1,5 @@
|
||||
home = /usr/bin
|
||||
include-system-site-packages = false
|
||||
version = 3.11.2
|
||||
executable = /usr/bin/python3.11
|
||||
command = /home/trim/Documents/GitHub/cognitive/ant_colony_env/bin/python3 -m venv /home/trim/Documents/GitHub/cognitive/ant_colony_env
|
||||
225
ant_colony_env/share/man/man1/ttx.1
Обычный файл
225
ant_colony_env/share/man/man1/ttx.1
Обычный файл
@ -0,0 +1,225 @@
|
||||
.Dd May 18, 2004
|
||||
.\" ttx is not specific to any OS, but contrary to what groff_mdoc(7)
|
||||
.\" seems to imply, entirely omitting the .Os macro causes 'BSD' to
|
||||
.\" be used, so I give a zero-width space as its argument.
|
||||
.Os \&
|
||||
.\" The "FontTools Manual" argument apparently has no effect in
|
||||
.\" groff 1.18.1. I think it is a bug in the -mdoc groff package.
|
||||
.Dt TTX 1 "FontTools Manual"
|
||||
.Sh NAME
|
||||
.Nm ttx
|
||||
.Nd tool for manipulating TrueType and OpenType fonts
|
||||
.Sh SYNOPSIS
|
||||
.Nm
|
||||
.Bk
|
||||
.Op Ar option ...
|
||||
.Ek
|
||||
.Bk
|
||||
.Ar file ...
|
||||
.Ek
|
||||
.Sh DESCRIPTION
|
||||
.Nm
|
||||
is a tool for manipulating TrueType and OpenType fonts. It can convert
|
||||
TrueType and OpenType fonts to and from an
|
||||
.Tn XML Ns -based format called
|
||||
.Tn TTX .
|
||||
.Tn TTX
|
||||
files have a
|
||||
.Ql .ttx
|
||||
extension.
|
||||
.Pp
|
||||
For each
|
||||
.Ar file
|
||||
argument it is given,
|
||||
.Nm
|
||||
detects whether it is a
|
||||
.Ql .ttf ,
|
||||
.Ql .otf
|
||||
or
|
||||
.Ql .ttx
|
||||
file and acts accordingly: if it is a
|
||||
.Ql .ttf
|
||||
or
|
||||
.Ql .otf
|
||||
file, it generates a
|
||||
.Ql .ttx
|
||||
file; if it is a
|
||||
.Ql .ttx
|
||||
file, it generates a
|
||||
.Ql .ttf
|
||||
or
|
||||
.Ql .otf
|
||||
file.
|
||||
.Pp
|
||||
By default, every output file is created in the same directory as the
|
||||
corresponding input file and with the same name except for the
|
||||
extension, which is substituted appropriately.
|
||||
.Nm
|
||||
never overwrites existing files; if necessary, it appends a suffix to
|
||||
the output file name before the extension, as in
|
||||
.Pa Arial#1.ttf .
|
||||
.Ss "General options"
|
||||
.Bl -tag -width ".Fl t Ar table"
|
||||
.It Fl h
|
||||
Display usage information.
|
||||
.It Fl d Ar dir
|
||||
Write the output files to directory
|
||||
.Ar dir
|
||||
instead of writing every output file to the same directory as the
|
||||
corresponding input file.
|
||||
.It Fl o Ar file
|
||||
Write the output to
|
||||
.Ar file
|
||||
instead of writing it to the same directory as the
|
||||
corresponding input file.
|
||||
.It Fl v
|
||||
Be verbose. Write more messages to the standard output describing what
|
||||
is being done.
|
||||
.It Fl a
|
||||
Allow virtual glyphs ID's on compile or decompile.
|
||||
.El
|
||||
.Ss "Dump options"
|
||||
The following options control the process of dumping font files
|
||||
(TrueType or OpenType) to
|
||||
.Tn TTX
|
||||
files.
|
||||
.Bl -tag -width ".Fl t Ar table"
|
||||
.It Fl l
|
||||
List table information. Instead of dumping the font to a
|
||||
.Tn TTX
|
||||
file, display minimal information about each table.
|
||||
.It Fl t Ar table
|
||||
Dump table
|
||||
.Ar table .
|
||||
This option may be given multiple times to dump several tables at
|
||||
once. When not specified, all tables are dumped.
|
||||
.It Fl x Ar table
|
||||
Exclude table
|
||||
.Ar table
|
||||
from the list of tables to dump. This option may be given multiple
|
||||
times to exclude several tables from the dump. The
|
||||
.Fl t
|
||||
and
|
||||
.Fl x
|
||||
options are mutually exclusive.
|
||||
.It Fl s
|
||||
Split tables. Dump each table to a separate
|
||||
.Tn TTX
|
||||
file and write (under the name that would have been used for the output
|
||||
file if the
|
||||
.Fl s
|
||||
option had not been given) one small
|
||||
.Tn TTX
|
||||
file containing references to the individual table dump files. This
|
||||
file can be used as input to
|
||||
.Nm
|
||||
as long as the referenced files can be found in the same directory.
|
||||
.It Fl i
|
||||
.\" XXX: I suppose OpenType programs (exist and) are also affected.
|
||||
Don't disassemble TrueType instructions. When this option is specified,
|
||||
all TrueType programs (glyph programs, the font program and the
|
||||
pre-program) are written to the
|
||||
.Tn TTX
|
||||
file as hexadecimal data instead of
|
||||
assembly. This saves some time and results in smaller
|
||||
.Tn TTX
|
||||
files.
|
||||
.It Fl y Ar n
|
||||
When decompiling a TrueType Collection (TTC) file,
|
||||
decompile font number
|
||||
.Ar n ,
|
||||
starting from 0.
|
||||
.El
|
||||
.Ss "Compilation options"
|
||||
The following options control the process of compiling
|
||||
.Tn TTX
|
||||
files into font files (TrueType or OpenType):
|
||||
.Bl -tag -width ".Fl t Ar table"
|
||||
.It Fl m Ar fontfile
|
||||
Merge the input
|
||||
.Tn TTX
|
||||
file
|
||||
.Ar file
|
||||
with
|
||||
.Ar fontfile .
|
||||
No more than one
|
||||
.Ar file
|
||||
argument can be specified when this option is used.
|
||||
.It Fl b
|
||||
Don't recalculate glyph bounding boxes. Use the values in the
|
||||
.Tn TTX
|
||||
file as is.
|
||||
.El
|
||||
.Sh "THE TTX FILE FORMAT"
|
||||
You can find some information about the
|
||||
.Tn TTX
|
||||
file format in
|
||||
.Pa documentation.html .
|
||||
In particular, you will find in that file the list of tables understood by
|
||||
.Nm
|
||||
and the relations between TrueType GlyphIDs and the glyph names used in
|
||||
.Tn TTX
|
||||
files.
|
||||
.Sh EXAMPLES
|
||||
In the following examples, all files are read from and written to the
|
||||
current directory. Additionally, the name given for the output file
|
||||
assumes in every case that it did not exist before
|
||||
.Nm
|
||||
was invoked.
|
||||
.Pp
|
||||
Dump the TrueType font contained in
|
||||
.Pa FreeSans.ttf
|
||||
to
|
||||
.Pa FreeSans.ttx :
|
||||
.Pp
|
||||
.Dl ttx FreeSans.ttf
|
||||
.Pp
|
||||
Compile
|
||||
.Pa MyFont.ttx
|
||||
into a TrueType or OpenType font file:
|
||||
.Pp
|
||||
.Dl ttx MyFont.ttx
|
||||
.Pp
|
||||
List the tables in
|
||||
.Pa FreeSans.ttf
|
||||
along with some information:
|
||||
.Pp
|
||||
.Dl ttx -l FreeSans.ttf
|
||||
.Pp
|
||||
Dump the
|
||||
.Sq cmap
|
||||
table from
|
||||
.Pa FreeSans.ttf
|
||||
to
|
||||
.Pa FreeSans.ttx :
|
||||
.Pp
|
||||
.Dl ttx -t cmap FreeSans.ttf
|
||||
.Sh NOTES
|
||||
On MS\-Windows and MacOS,
|
||||
.Nm
|
||||
is available as a graphical application to which files can be dropped.
|
||||
.Sh SEE ALSO
|
||||
.Pa documentation.html
|
||||
.Pp
|
||||
.Xr fontforge 1 ,
|
||||
.Xr ftinfo 1 ,
|
||||
.Xr gfontview 1 ,
|
||||
.Xr xmbdfed 1 ,
|
||||
.Xr Font::TTF 3pm
|
||||
.Sh AUTHORS
|
||||
.Nm
|
||||
was written by
|
||||
.An -nosplit
|
||||
.An "Just van Rossum" Aq just@letterror.com .
|
||||
.Pp
|
||||
This manual page was written by
|
||||
.An "Florent Rougon" Aq f.rougon@free.fr
|
||||
for the Debian GNU/Linux system based on the existing FontTools
|
||||
documentation. It may be freely used, modified and distributed without
|
||||
restrictions.
|
||||
.\" For Emacs:
|
||||
.\" Local Variables:
|
||||
.\" fill-column: 72
|
||||
.\" sentence-end: "[.?!][]\"')}]*\\($\\| $\\| \\| \\)[ \n]*"
|
||||
.\" sentence-end-double-space: t
|
||||
.\" End:
|
||||
Загрузка…
x
Ссылка в новой задаче
Block a user