M .gitignore => .gitignore +3 -0
@@ 1,3 1,6 @@
*.png
*.svg
target
+solutions
+solutions.bcp
+solutions.bcp2
M codes/README.md => codes/README.md +5 -3
@@ 37,10 37,12 @@ The name of the plotter is same as used for hw01 as the plotting itself is the s
To obtain the graphs in the report, use the following sequence of commands:
1. `compute.sh` to compute all the algorithms on all instances, 10 times for each instance.
-2. Generate the plots, go to `tsp_plotter` folder, and run
+2. Generate the plots, go to `py_plotter` folder, and run
```
-cargo build --release
-# TODO
+python3 ./plotter.py config_feasible_g11.json
+python3 ./plotter.py config_feasible_g06.json
+python3 ./plotter.py config_feasible_g05.json
+python3 ./plotter.py config_best_g09.json
```
Now all the csv solutions are in `constr_hw01/solutions` and all the plots are in
M codes/compute.sh => codes/compute.sh +3 -4
@@ 2,12 2,11 @@
set -euxo pipefail
-# algorithms=("srank" "nsga" "nsga_multi" "nsga_multi_noncapped")
-algorithms=("nsga_multi_noncapped")
+algorithms=("srank" "nsga" "nsga_multi" "nsga_constr" "nsga_improved")
-instances=("g06" "g08" "g11" "g24")
+instances=("g04" "g05" "g06" "g08" "g09" "g11" "g24")
-repetitions="10"
+repetitions="20"
(cd constr_hw02 && cargo build --release)
M codes/constr_hw02/src/main.rs => codes/constr_hw02/src/main.rs +60 -9
@@ 139,11 139,23 @@ pub fn solve_with_nsga_ii<const DIM: usize, const CONSTRAINTS: usize>(
// Setup components
let mut pairing = AdjacentPairing::new();
- let mut crossover = ArithmeticCrossover::new();
+ let crossover = ArithmeticCrossover::new();
+ let mut crossover = BoundedCrossover::<nalgebra::Const<DIM>, 2, _>::new(
+ crossover,
+ problem.bounds.0,
+ problem.bounds.1,
+ BoundedCrossoverStrategy::Retry(5)
+ );
// Setup bounded random distribution perturbation with Normal distribution
let normal_perturbation = RandomDistributionPerturbation::<DIM, Normal<f64>>::normal(mutation_std_dev)?;
- let mut mutation = MutationPerturbation::new(Box::new(normal_perturbation), 0.1);
+ let perturbation = BoundedPerturbation::new(
+ normal_perturbation,
+ problem.bounds.0,
+ problem.bounds.1,
+ BoundedPerturbationStrategy::Retry(5)
+ );
+ let mut mutation = MutationPerturbation::new(Box::new(perturbation), 0.1);
let better_than = MinimizingOperator::new();
@@ 245,9 257,21 @@ pub fn solve_with_nsga_multi<const DIM: usize, const CONSTRAINTS: usize, const C
// Setup components
let mut pairing = AdjacentPairing::new();
- let mut crossover = ArithmeticCrossover::new();
+ let crossover = ArithmeticCrossover::new();
+ let mut crossover = BoundedCrossover::<nalgebra::Const<DIM>, 2, _>::new(
+ crossover,
+ bounds.0,
+ bounds.1,
+ BoundedCrossoverStrategy::Retry(5)
+ );
let normal_perturbation = RandomDistributionPerturbation::<DIM, Normal<f64>>::normal(mutation_std_dev)?;
- let mut mutation = MutationPerturbation::new(Box::new(normal_perturbation), 0.1);
+ let perturbation = BoundedPerturbation::new(
+ normal_perturbation,
+ bounds.0,
+ bounds.1,
+ BoundedPerturbationStrategy::Retry(5)
+ );
+ let mut mutation = MutationPerturbation::new(Box::new(perturbation), 0.1);
let better_than = MinimizingOperator::new();
// Create objectives: fitness + individual constraints using cloned problem
@@ 345,11 369,23 @@ pub fn solve_with_nsga_constr<const DIM: usize, const CONSTRAINTS: usize>(
// Setup components
let mut pairing = AdjacentPairing::new();
- let mut crossover = ArithmeticCrossover::new();
+ let crossover = ArithmeticCrossover::new();
+ let mut crossover = BoundedCrossover::<nalgebra::Const<DIM>, 2, _>::new(
+ crossover,
+ problem.bounds.0,
+ problem.bounds.1,
+ BoundedCrossoverStrategy::Retry(5)
+ );
// Setup bounded random distribution perturbation with Normal distribution
let normal_perturbation = RandomDistributionPerturbation::<DIM, Normal<f64>>::normal(mutation_std_dev)?;
- let mut mutation = MutationPerturbation::new(Box::new(normal_perturbation), 0.1);
+ let perturbation = BoundedPerturbation::new(
+ normal_perturbation,
+ problem.bounds.0,
+ problem.bounds.1,
+ BoundedPerturbationStrategy::Retry(5)
+ );
+ let mut mutation = MutationPerturbation::new(Box::new(perturbation), 0.1);
let better_than = MinimizingOperator::new();
@@ 463,19 499,34 @@ pub fn solve_with_nsga_improved<const DIM: usize, const CONSTRAINTS: usize>(
// Setup components
let mut pairing = AdjacentPairing::new();
- // Create the wrapped crossover with arithmetic crossover inside
+ // Create bounded crossover first, then wrap it
+ let arithmetic_crossover = ArithmeticCrossover::new();
+ let bounded_crossover = BoundedCrossover::<nalgebra::Const<DIM>, 2, _>::new(
+ arithmetic_crossover,
+ problem.bounds.0,
+ problem.bounds.1,
+ BoundedCrossoverStrategy::Retry(5)
+ );
+
+ // Create the wrapped crossover with bounded crossover inside
let mut wrapped_crossover = FeasibleCrossoverWrapper {
p_single_replaced: P_SINGLE_REPLACED,
p_double_first_replaced: P_DOUBLE_FIRST_REPLACED,
p_double_second_replaced: P_DOUBLE_SECOND_REPLACED,
archived_count: ARCHIVE_SIZE,
archived_population: Vec::new(),
- crossover: ArithmeticCrossover::new(),
+ crossover: bounded_crossover,
};
// Setup bounded random distribution perturbation with Normal distribution
let normal_perturbation = RandomDistributionPerturbation::<DIM, Normal<f64>>::normal(mutation_std_dev)?;
- let mut mutation = MutationPerturbation::new(Box::new(normal_perturbation), 0.1);
+ let perturbation = BoundedPerturbation::new(
+ normal_perturbation,
+ problem.bounds.0,
+ problem.bounds.1,
+ BoundedPerturbationStrategy::Retry(5)
+ );
+ let mut mutation = MutationPerturbation::new(Box::new(perturbation), 0.1);
let better_than = MinimizingOperator::new();
M codes/py_plotter/config_example.json => codes/py_plotter/config_example.json +2 -2
@@ 28,8 28,8 @@
],
"instances": [
{
- "name": "g24",
- "label": "G24",
+ "name": "g11",
+ "label": "G11",
"color": "#d62728"
}
],
A codes/py_plotter/config_target_proximity_1_percent.json => codes/py_plotter/config_target_proximity_1_percent.json +53 -0
@@ 0,0 1,53 @@
+{
+ "data_path": "../constr_hw02/solutions",
+ "output_dir": "plots",
+ "plot_name": "1_percent_over_time",
+
+ "target": 1.0,
+
+ "problem_groups": [
+ ["g05", "g06", "g09", "g11"]
+ ],
+
+ "algorithms": [
+ {
+ "name": "srank",
+ "label": "S-Rank",
+ "color": "#1f77b4",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga",
+ "label": "NSGA-II",
+ "color": "#ff7f0e",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga_multi",
+ "label": "NSGA-II Multi",
+ "color": "#2ca02c",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga_improved",
+ "label": "NSGA-II Improved",
+ "color": "#ff7f0e",
+ "linestyle": "--"
+ },
+ {
+ "name": "nsga_constr",
+ "label": "NSGA-II Constr",
+ "color": "#ff7f0e",
+ "linestyle": ":"
+ }
+ ],
+
+ "plot_settings": {
+ "figsize": [10, 6],
+ "xlabel": "Function Evaluations",
+ "ylabel": "Fraction of Runs Within 1% of Optimal",
+ "title": "Target Proximity Achievement Over Time (1% Target)",
+ "grid": true,
+ "legend": true
+ }
+}<
\ No newline at end of file
A codes/py_plotter/config_target_proximity_comprehensive.json => codes/py_plotter/config_target_proximity_comprehensive.json +56 -0
@@ 0,0 1,56 @@
+{
+ "data_path": "../constr_hw02/solutions",
+ "output_dir": "plots",
+ "plot_name": "comprehensive_all_instances",
+
+ "targets": [0.1, 0.5, 1.0, 5.0, 10.0],
+
+ "problem_groups": [
+ ["g04", "g05", "g06", "g08", "g09", "g11", "g21", "g24"]
+ ],
+
+ "algorithms": [
+ {
+ "name": "srank",
+ "label": "S-Rank",
+ "color": "#1f77b4",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga",
+ "label": "NSGA-II",
+ "color": "#ff7f0e",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga_multi",
+ "label": "NSGA-II Multi",
+ "color": "#2ca02c",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga_improved",
+ "label": "NSGA-II Improved",
+ "color": "#ff7f0e",
+ "linestyle": "--"
+ },
+ {
+ "name": "nsga_constr",
+ "label": "NSGA-II Constr",
+ "color": "#ff7f0e",
+ "linestyle": ":"
+ }
+ ],
+
+ "plot_settings": {
+ "figsize": [10, 6],
+ "xlabel": "Function Evaluations",
+ "ylabel": "Probability of Success",
+ "title": "Probability of success over all problems and instances",
+ "grid": true,
+ "legend": true,
+ "log_x": true,
+ "show_std": true,
+ "alpha_fill": 0.2
+ }
+}
A codes/py_plotter/config_target_proximity_comprehensive_no_std.json => codes/py_plotter/config_target_proximity_comprehensive_no_std.json +55 -0
@@ 0,0 1,55 @@
+{
+ "data_path": "../constr_hw02/solutions",
+ "output_dir": "plots",
+ "plot_name": "comprehensive_all_instances_no_std",
+
+ "targets": [0.1, 0.5, 1.0, 5.0, 10.0],
+
+ "problem_groups": [
+ ["g04", "g05", "g06", "g08", "g09", "g11", "g21", "g24"]
+ ],
+
+ "algorithms": [
+ {
+ "name": "srank",
+ "label": "S-Rank",
+ "color": "#1f77b4",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga",
+ "label": "NSGA-II",
+ "color": "#ff7f0e",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga_multi",
+ "label": "NSGA-II Multi",
+ "color": "#2ca02c",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga_improved",
+ "label": "NSGA-II Improved",
+ "color": "#ff7f0e",
+ "linestyle": "--"
+ },
+ {
+ "name": "nsga_constr",
+ "label": "NSGA-II Constr",
+ "color": "#ff7f0e",
+ "linestyle": ":"
+ }
+ ],
+
+ "plot_settings": {
+ "figsize": [10, 6],
+ "xlabel": "Function Evaluations",
+ "ylabel": "Probability of Success",
+ "title": "Probability of success over all problems and instances",
+ "grid": true,
+ "legend": true,
+ "log_x": true,
+ "show_std": false
+ }
+}<
\ No newline at end of file
A codes/py_plotter/config_target_proximity_example.json => codes/py_plotter/config_target_proximity_example.json +54 -0
@@ 0,0 1,54 @@
+{
+ "data_path": "../constr_hw02/solutions",
+ "output_dir": "plots",
+ "plot_name": "example_5_percent_over_time",
+
+ "target": 5.0,
+
+ "problem_groups": [
+ ["g05", "g06"],
+ ["g09", "g11"]
+ ],
+
+ "algorithms": [
+ {
+ "name": "srank",
+ "label": "S-Rank",
+ "color": "#1f77b4",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga",
+ "label": "NSGA-II",
+ "color": "#ff7f0e",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga_multi",
+ "label": "NSGA-II Multi",
+ "color": "#2ca02c",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga_improved",
+ "label": "NSGA-II Improved",
+ "color": "#ff7f0e",
+ "linestyle": "--"
+ },
+ {
+ "name": "nsga_constr",
+ "label": "NSGA-II Constr",
+ "color": "#ff7f0e",
+ "linestyle": ":"
+ }
+ ],
+
+ "plot_settings": {
+ "figsize": [10, 6],
+ "xlabel": "Function Evaluations",
+ "ylabel": "Fraction of Runs Within 5% of Optimal",
+ "title": "Target Proximity Achievement Over Time",
+ "grid": true,
+ "legend": true
+ }
+}<
\ No newline at end of file
A codes/py_plotter/config_target_proximity_multi.json => codes/py_plotter/config_target_proximity_multi.json +54 -0
@@ 0,0 1,54 @@
+{
+ "data_path": "../constr_hw02/solutions",
+ "output_dir": "plots",
+ "plot_name": "multi_targets_averaged",
+
+ "targets": [1.0, 2.0, 5.0, 10.0, 15.0],
+
+ "problem_groups": [
+ ["g05", "g06", "g09", "g11"]
+ ],
+
+ "algorithms": [
+ {
+ "name": "srank",
+ "label": "S-Rank",
+ "color": "#1f77b4",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga",
+ "label": "NSGA-II",
+ "color": "#ff7f0e",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga_multi",
+ "label": "NSGA-II Multi",
+ "color": "#2ca02c",
+ "linestyle": "-"
+ },
+ {
+ "name": "nsga_improved",
+ "label": "NSGA-II Improved",
+ "color": "#ff7f0e",
+ "linestyle": "--"
+ },
+ {
+ "name": "nsga_constr",
+ "label": "NSGA-II Constr",
+ "color": "#ff7f0e",
+ "linestyle": ":"
+ }
+ ],
+
+ "plot_settings": {
+ "figsize": [10, 6],
+ "xlabel": "Function Evaluations (log scale)",
+ "ylabel": "Average Fraction of Runs Within Target",
+ "title": "Target Proximity Achievement (Averaged: 1%, 2%, 5%, 10%, 15%)",
+ "grid": true,
+ "legend": true,
+ "log_x": true
+ }
+}<
\ No newline at end of file
A codes/py_plotter/target_proximity_plotter.py => codes/py_plotter/target_proximity_plotter.py +329 -0
@@ 0,0 1,329 @@
+#!/usr/bin/env python3
+
+import json
+import csv
+import matplotlib
+matplotlib.use('Agg') # Use non-interactive backend
+import matplotlib.pyplot as plt
+import numpy as np
+from pathlib import Path
+import glob
+import argparse
+
+class TargetProximityPlotter:
+ def __init__(self, config_path):
+ with open(config_path, 'r') as f:
+ self.config = json.load(f)
+
+ self.data_path = Path(self.config['data_path'])
+ self.output_dir = Path(self.config['output_dir'])
+ self.output_dir.mkdir(exist_ok=True)
+
+ # Load objectives for percentage deviation calculation
+ objectives_path = Path(__file__).parent / 'objectives.json'
+ with open(objectives_path, 'r') as f:
+ self.objectives = json.load(f)
+
+ def calculate_percentage_deviation(self, values, instance_name):
+ """Calculate percentage deviation from optimal value - reused from original plotter"""
+ if instance_name not in self.objectives:
+ raise ValueError(f"No objective value found for instance {instance_name}")
+
+ optimal_value = self.objectives[instance_name]
+
+ # Check if any values are significantly better than the known optimum
+ tolerance = 1e-4 * np.abs(optimal_value)
+ significantly_better = values < (optimal_value - tolerance)
+
+ if np.any(significantly_better):
+ better_indices = np.where(significantly_better)[0]
+ best_found = np.min(values[better_indices])
+ improvement = optimal_value - best_found
+ improvement_pct = improvement / np.abs(optimal_value) * 100
+
+ print(f"WARNING: Found {np.sum(significantly_better)} values better than known optimum for {instance_name}!")
+ print(f"Known optimum: {optimal_value}")
+ print(f"Best found: {best_found}")
+ print(f"Improvement: {improvement} ({improvement_pct:.3f}%)")
+ print(f"Using best found value as new reference point.")
+
+ # Update the optimal value to the best found for this calculation
+ optimal_value = best_found
+
+ new_optimal_value = optimal_value
+ if optimal_value < 0:
+ new_optimal_value = -optimal_value
+ values = values + 2*new_optimal_value
+
+ # Calculate percentage deviation: (current - optimal) / |optimal| * 100
+ percentage_deviations = (values - new_optimal_value) / new_optimal_value * 100
+ return percentage_deviations
+
+ def load_runs_for_algorithm_instance(self, algorithm, instance):
+ """Load all individual runs for an algorithm-instance combination"""
+ algorithm_path = self.data_path / algorithm / instance
+ csv_files = list(algorithm_path.glob('best_candidates_*.csv'))
+
+ if not csv_files:
+ print(f"Warning: No CSV files found for {algorithm}/{instance}")
+ return None
+
+ print(f"Found {len(csv_files)} files for {algorithm}/{instance}")
+
+ all_runs = []
+ for csv_file in csv_files:
+ try:
+ with open(csv_file, 'r') as f:
+ reader = csv.DictReader(f)
+ iterations = []
+ evaluations = []
+
+ for row in reader:
+ if 'iteration' in row and 'evaluation' in row:
+ iterations.append(float(row['iteration']))
+ evaluations.append(float(row['evaluation']))
+
+ if iterations and evaluations:
+ # Convert to percentage deviation
+ values = np.array(evaluations)
+ percentage_devs = self.calculate_percentage_deviation(values, instance)
+ run_data = {
+ 'iteration': np.array(iterations),
+ 'percentage_deviation': percentage_devs
+ }
+ all_runs.append(run_data)
+ except Exception as e:
+ print(f"Error reading {csv_file}: {e}")
+
+ return all_runs if all_runs else None
+
+ def calculate_target_proximity_over_time_for_algorithm_instance(self, algorithm, instance, target_pct):
+ """Calculate fraction of runs within target proximity at each iteration"""
+ runs = self.load_runs_for_algorithm_instance(algorithm, instance)
+ if not runs:
+ return None
+
+ num_runs = len(runs)
+
+ # Find common iteration grid across all runs (similar to original plotter)
+ all_iterations = set()
+ for run in runs:
+ all_iterations.update(run['iteration'].tolist())
+ common_grid = sorted(list(all_iterations))
+
+ # For each iteration, count how many runs are within target
+ fractions_over_time = []
+ iterations = []
+
+ for eval_point in common_grid:
+ within_target_count = 0
+
+ for run in runs:
+ # Find the best deviation achieved up to this evaluation point
+ mask = run['iteration'] <= eval_point
+ if np.any(mask):
+ best_deviation_so_far = np.min(run['percentage_deviation'][mask])
+ if best_deviation_so_far <= target_pct:
+ within_target_count += 1
+
+ fraction = within_target_count / num_runs
+ fractions_over_time.append(fraction)
+ iterations.append(eval_point)
+
+ return {
+ 'iterations': np.array(iterations),
+ 'fractions': np.array(fractions_over_time)
+ }
+
+ def calculate_algorithm_average_over_time(self, algorithm, problem_groups, target_pct):
+ """Calculate average target proximity over time across problem groups for one algorithm"""
+ all_group_data = []
+
+ for group in problem_groups:
+ group_data = []
+
+ for instance in group:
+ result = self.calculate_target_proximity_over_time_for_algorithm_instance(
+ algorithm, instance, target_pct
+ )
+ if result:
+ group_data.append(result)
+
+ if group_data:
+ all_group_data.extend(group_data)
+
+ if not all_group_data:
+ return None
+
+ # Find common iteration grid across all problems
+ all_iterations = set()
+ for data in all_group_data:
+ all_iterations.update(data['iterations'].tolist())
+ common_grid = sorted(list(all_iterations))
+
+ # Interpolate each problem's data to common grid and average
+ averaged_fractions = []
+
+ for eval_point in common_grid:
+ fractions_at_eval = []
+
+ for data in all_group_data:
+ # Find the fraction at this evaluation point (or the last known value)
+ mask = data['iterations'] <= eval_point
+ if np.any(mask):
+ last_known_fraction = data['fractions'][mask][-1]
+ fractions_at_eval.append(last_known_fraction)
+ else:
+ # Before any evaluations, fraction is 0
+ fractions_at_eval.append(0.0)
+
+ averaged_fractions.append(np.mean(fractions_at_eval))
+
+ return {
+ 'iterations': np.array(common_grid),
+ 'fractions': np.array(averaged_fractions)
+ }
+
+ def create_plot(self):
+ """Create the target proximity plot"""
+ fig, ax = plt.subplots(1, 1, figsize=self.config['plot_settings']['figsize'])
+
+ # Support both single target and multiple targets
+ if 'targets' in self.config:
+ targets = self.config['targets'] # Multiple targets
+ else:
+ targets = [self.config['target']] # Single target (backward compatibility)
+
+ problem_groups = self.config['problem_groups']
+
+ # First pass: collect all algorithm data for all targets to find global max iteration
+ all_algorithm_data = {}
+ global_max_iteration = 0
+
+ for algorithm in self.config['algorithms']:
+ alg_name = algorithm['name']
+ print(f"Processing algorithm: {alg_name}")
+
+ # Collect data for each target
+ target_data_list = []
+
+ for target_pct in targets:
+ # Get average results over time across problem groups for this target
+ alg_data = self.calculate_algorithm_average_over_time(alg_name, problem_groups, target_pct)
+
+ if alg_data:
+ target_data_list.append(alg_data)
+ global_max_iteration = max(global_max_iteration, alg_data['iterations'].max())
+
+ if target_data_list:
+ all_algorithm_data[alg_name] = target_data_list
+ else:
+ print(f"No data found for algorithm {alg_name}")
+
+ # Second pass: average across targets and extend all data to global max, then plot
+ for algorithm in self.config['algorithms']:
+ alg_name = algorithm['name']
+ alg_label = algorithm['label']
+ alg_color = algorithm.get('color', '#000000')
+ linestyle = algorithm.get('linestyle', '-')
+
+ if alg_name not in all_algorithm_data:
+ continue
+
+ target_data_list = all_algorithm_data[alg_name]
+
+ # Find common iteration grid across all targets for this algorithm
+ all_iterations = set()
+ for data in target_data_list:
+ all_iterations.update(data['iterations'].tolist())
+ all_iterations.add(global_max_iteration) # Include global max
+ common_grid = sorted(list(all_iterations))
+
+ # Average fractions across targets at each iteration point
+ averaged_fractions = []
+ std_fractions = []
+
+ for eval_point in common_grid:
+ fractions_at_eval = []
+
+ for data in target_data_list:
+ # Find the fraction at this evaluation point (or the last known value)
+ mask = data['iterations'] <= eval_point
+ if np.any(mask):
+ last_known_fraction = data['fractions'][mask][-1]
+ fractions_at_eval.append(last_known_fraction)
+ else:
+ # Before any evaluations, fraction is 0
+ fractions_at_eval.append(0.0)
+
+ averaged_fractions.append(np.mean(fractions_at_eval))
+ std_fractions.append(np.std(fractions_at_eval))
+
+ averaged_fractions = np.array(averaged_fractions)
+ std_fractions = np.array(std_fractions)
+
+ # Plot the averaged line
+ ax.plot(common_grid, averaged_fractions,
+ color=alg_color,
+ linestyle=linestyle,
+ label=alg_label,
+ linewidth=2,
+ drawstyle='steps-post') # Step plot like original plotter
+
+ # Add fill_between for standard deviation bands (if enabled)
+ if self.config['plot_settings'].get('show_std', False):
+ lower_bound = averaged_fractions - std_fractions
+ upper_bound = averaged_fractions + std_fractions
+
+ # Ensure bounds stay within [0, 1] range
+ lower_bound = np.maximum(lower_bound, 0.0)
+ upper_bound = np.minimum(upper_bound, 1.0)
+
+ alpha_fill = self.config['plot_settings'].get('alpha_fill', 0.2)
+ ax.fill_between(common_grid,
+ lower_bound,
+ upper_bound,
+ color=alg_color,
+ alpha=alpha_fill,
+ step='post')
+
+ # Configure plot
+ ax.set_xlabel(self.config['plot_settings']['xlabel'], fontsize=18)
+ ax.set_ylabel(self.config['plot_settings']['ylabel'], fontsize=18)
+ ax.set_title(self.config['plot_settings']['title'], fontsize=20)
+
+ # Set logarithmic x-axis if requested
+ if self.config['plot_settings'].get('log_x', False):
+ ax.set_xscale('log')
+
+ # Set tick label font sizes
+ ax.tick_params(axis='both', which='major', labelsize=16)
+ ax.tick_params(axis='both', which='minor', labelsize=14)
+
+ # Set y-axis to [-0.1, 1.1] range for better visibility of 0.0 and 1.0 values
+ ax.set_ylim(-0.1, 1.1)
+ ax.yaxis.set_major_formatter(plt.FuncFormatter(lambda x, p: f'{x:.1f}'))
+
+ if self.config['plot_settings'].get('grid', True):
+ ax.grid(True, alpha=0.3)
+
+ if self.config['plot_settings'].get('legend', True):
+ ax.legend(fontsize=14)
+
+ plt.tight_layout()
+
+ # Save plot
+ output_file = self.output_dir / f"target_proximity_{self.config['plot_name']}.svg"
+ plt.savefig(output_file, format='svg', bbox_inches='tight')
+ print(f"Plot saved to: {output_file}")
+
+def main():
+ parser = argparse.ArgumentParser(description='Plot target proximity analysis for constraint optimization results')
+ parser.add_argument('config', help='Path to JSON configuration file')
+ args = parser.parse_args()
+
+ plotter = TargetProximityPlotter(args.config)
+ plotter.create_plot()
+
+if __name__ == '__main__':
+ main()
A codes/py_plotter/test_target_proximity.py => codes/py_plotter/test_target_proximity.py +74 -0
@@ 0,0 1,74 @@
+#!/usr/bin/env python3
+
+import json
+import csv
+import numpy as np
+from pathlib import Path
+
+# Simple test to verify the target proximity logic works
+config = {
+ "data_path": "../constr_hw02/solutions",
+ "targets": [5.0, 10.0],
+ "problem_groups": [["g05", "g06"], ["g09", "g11"]],
+}
+
+objectives = {
+ "g04": -30665.53867178333,
+ "g05": 5126.4967140071,
+ "g06": -6961.8137558015,
+ "g08": -0.0958250414180359,
+ "g09": 680.6300573745,
+ "g11": 0.7499,
+ "g21": 193.724510070035,
+ "g24": -5.50801327159536
+}
+
+def calculate_percentage_deviation(values, instance_name):
+ """Calculate percentage deviation from optimal value"""
+ if instance_name not in objectives:
+ raise ValueError(f"No objective value found for instance {instance_name}")
+
+ optimal_value = objectives[instance_name]
+
+ new_optimal_value = optimal_value
+ if optimal_value < 0:
+ new_optimal_value = -optimal_value
+ values = values + 2*new_optimal_value
+
+ percentage_deviations = (values - new_optimal_value) / new_optimal_value * 100
+ return percentage_deviations
+
+# Test with a sample from nsga/g05
+data_path = Path(config["data_path"])
+sample_file = data_path / "nsga" / "g05" / "best_candidates_20251206_184606.csv"
+
+if sample_file.exists():
+ print(f"Testing with file: {sample_file}")
+
+ with open(sample_file, 'r') as f:
+ reader = csv.DictReader(f)
+ evaluations = []
+ for row in reader:
+ evaluations.append(float(row['evaluation']))
+
+ print(f"Found {len(evaluations)} evaluations")
+ print(f"First few values: {evaluations[:5]}")
+
+ # Calculate percentage deviations
+ values = np.array(evaluations)
+ deviations = calculate_percentage_deviation(values, "g05")
+
+ print(f"Percentage deviations: {deviations[:5]}...{deviations[-5:]}")
+ print(f"Final deviation: {deviations[-1]:.2f}%")
+
+ # Test target proximity
+ for target in [5.0, 10.0]:
+ within_target = deviations[-1] <= target
+ print(f"Within {target}% target: {within_target}")
+
+else:
+ print(f"Sample file not found: {sample_file}")
+ print("Available files:")
+ if (data_path / "nsga" / "g05").exists():
+ for f in (data_path / "nsga" / "g05").glob("*.csv"):
+ print(f" {f.name}")<
\ No newline at end of file
M codes/report.md => codes/report.md +112 -23
@@ 38,7 38,7 @@ Stochastic ranking is implemented in `eoa_lib/src/constraints.rs`.
## Unconstrained multi objective evolution (NSGA-II)
As a second method, NSGA-II has been used to solve the constrained problems.
-It's mainly meant to solve multi-objective problems. But the constraint problem
+It is mainly designed to solve multi-objective problems. But the constraint problem
can be mapped to it by making the original objective the first objective and
weighted sum of constraints as second objective.
It works based on non-dominated fronts.
@@ 133,12 133,17 @@ This is implemented along with NSGA-II at `src/eoa_lib/multi_objective_evolution
# Results
-Every configuration has been ran 10 times to accomodate for inherent randomness. The initialization
+Every configuration has been ran 20 times to accomodate for inherent randomness. The initialization
has been done randomly, uniformly within bounds given in the definitions of the problems.
-Later in the algorithm run, candidates could escape those bounds, they weren't penalized for it
-nor disqualified in the results. It has been tried to also bound the results of mutation and
-crossover, but it was abandoned as it seems the algorithms still stay within the bounds, because
-they contain better solutions. This gives more challenge to the algorithms.
+Later in the algorithm run, the bounds are also enforced. Specifically for crossover and mutation.
+When mutation or crossover produce results out of bounds, the operation is retried five times.
+If all produce results out of bounds, the result is forcibly bounded. This has been done
+because some problems, such as `g04` were escaping the bounds and finding different optima,
+much lower than the originally reported optima. It is hasn't been check if this is because there
+are really better optima out of bounds or if there is a numerical instability causing this.
+None of the best candidate evaluations contain infeasible solutions. This is also why some
+of the graphs start only after few function evaluations, because a feasible solution has
+not been found until that evaluation.
For stochastic ranking approach, following parameters have been used:
tournament selection with 5 individuals and 0.95 probability
@@ 156,28 161,73 @@ and standard deviation given according to the following table:
- g09 - `1.0`
- g11 - `0.01` (epsilon is `0.00015`)
- g24 - `0.1`
-- g21 - `10.0`
-TODO parameters
-As for runtime parameters, population had 500 individuals, there are 1000 iterations,
-500 parents in each iteration. The number of iterations in stochastic ranking is 2 * population.
-Probability of comparing fitness in stochastic ranking is 0.45.
+As for runtime parameters, population had 250 individuals, each iteration
+generated 125 offsprings out of 125 parents. There were 1250 iterations,
+The number of iterations in stochastic ranking is 2 * population.
+Probability of comparing fitness in stochastic ranking is 0.45 for most problems.
+For g04 it was set to 0.65, because that led to significantly better results.
+For g05, it is 0.2, because with 0.45 usually no feasible solution has been found.
-For NSGA-II
-TODO
+To get the percentage deviation from the optimal value, because of negative values,
+the formula has been changed. Specifically in cases when the optimal value is below
+zero and the values cross zero, near zero the deviation would be zero. Because of that,
+in cases where optimal value is negative, the function has been shifted up by $-2 \cdot o$,
+where o is the optimal value. New optimal value is $o_{n} = -o$ and the data do not cross zero.
+Then the formula for deviation is $(x_{n} - o_{n}) / o_{n}$
+
+First here is a chosen problem's percentage deviation, specifically of g09. This is one of the
+haarder problems, using 7 variables and 4 constraints.
+
+{latex-placement="H"}
+
+The graph depicts the average values and it can be observed that at the beginning, the variance
+is very large, but it's getting consistent in further iterations. That means the algorithms
+are working roughly as they should, prefering better candidates.
+
+\newpage
## Comparing the approaches
The following plot compares:
-- Stochastic ranking
+- Stochastic ranking (S-Rank)
- NSGA-II
-- NSGA-II with multiple objectives
-- NSGA-II customized archive
-- NSGA-II constrain-optimized tournament
-
-TODO add the plot
-TODO add discussion
+- NSGA-II with multiple objectives (NSGA-II Multi)
+- NSGA-II improved - customized archive (NSGA-II Improved)
+- NSGA-II constrain-optimized tournament (NSGA-II Constr)
+
+Multiple targets have been chosen: 0.1 %, 0.5 %, 1 %, 5 %, 10 % and
+the value is increased every time a target is hit in any of the
+runs. The result is divided by all the runs. This has been collected from
+all used problems: g04, g05, g06, g08, g09, g11 and g24.
+Some of those problems are quite simple, using just 2D and 2 constraints,
+while some of them use more variables and more constraints.
+
+{latex-placement="H"}
+
+As can be observed, stochastic ranking is achieving lower success rates
+next to NSGA Multi and NSGA Constrained. This could be because of wrong parameter
+selection. It can be hard to balance the probability of comparing
+the objective and constraints, too high values lead to no feasible
+solutions, whereas too low mean the function is not optimized as well.
+
+NSGA multi is capable of catching up with S-Rank, despite the low
+number of feasible solutions it's capable to find, as can be observed
+in the next section. The reason that it behaves worse than the NSGA-II could
+be because of the multiple objectives, where it tries to minimize all of them.
+That way it might happen it minimizes mainly the objective function, but not
+the constraints. Or one of the constraints and not the other(s).
+
+Improved NSGA seems to behaves the best. This could be because it does
+not only prioritize feasible solutions, it prioritizes good feasible solutions.
+When we already do find a good feasible solution, there is much more potential
+for making it even better.
+
+NSGA constr is capable of finding more feasible solutions compared to NSGA,
+at least early in the run (see the next section), but it does produce
+worse results, suggesting that it prefers feasible solutions even if they aren't
+as good. This really is the case when looking at the conditions.
## Comparing g11 with g06
@@ 190,7 240,43 @@ has been obtained, as well as average constraint violation.
Here is a plot with fraction of feasible solutions in iteration for both
problems:
-## Comparing TODO insert hard problem with g11
+## Comparing feasibility in the population
+
+One important metric is how many individuals in the population
+are actually feasible. Because this can differ quite a lot for different
+problems, two graphs are shown here, one each for a single problem.
+
+Here is the graph for g06, that's one of the easier problems, 2D function,
+with two lower than constraints.
+
+Then g11, that's also only 2D, but the constraint is an equality
+function. Equality is harder to satisfy, because the area of feasible solutions
+is smaller. And through the mutation it's much more probable to leave the
+feasible area than to stay inside of it.
+
+\newpage
+
+{latex-placement="H"}
+{latex-placement="H"}
+
+\newpage
+
+As can be seen from the graphs, the NSGA-II multi approach is not so well,
+this could be because it tries to mostly optimize the original function and/or only one of the constraints.
+This is because it can get more reward for doing so and the third objective can be left above zero, leading
+to infesiable candidates. Still, it does produce some feasible candidates as can be seen from the previous
+results, it's just that they do not stay in the population for longer.
+
+What is worth noting is the NSGA. For g06 it is capable of reaching population full of feasible candidates,
+but for g11, the feasible solutions oscillate and stay lower than a half of the population.
+
+The srank converges to a given point which is what we would expect, because although it's depending on randomness,
+in the end the swaps are applied a lot of times, 'cancelling out' the randomness.
+
+Both proposed improved NSGA and NSGA with constraint handling are capable of reaching full population of feasible
+candidates. As for the proposed improvement, the population can sometimes lose feasible candidates, this is because
+of the inherent randomness. NSGA with constraint dominance on the other hand stays on 100 %, this is presumably because
+feasible solutions are always prioritized to the non-feasible ones based on the rules.
# Tasks done above minimal requirements
@@ 200,7 286,7 @@ problems:
- 1 point - NSGA-II with modified tournament operator (constrained domination)
# Code structure
-Rust has been chosen as the language. There are four subdirectories, `eoa_lib`, `tsp_hw01`, `constr_hw02` and `tsp_plotter`.
+Rust has been chosen as the language. There are four subdirectories, `eoa_lib`, `tsp_hw01`, `constr_hw02`, `tsp_plotter` and `py_plotter`.
`eoa_lib` is the library with the generic operators defined, with random search, local search and evolution
algorithm functions. It also contains the most common representations, perturbations and crossovers for them.
@@ 208,10 294,13 @@ algorithm functions. It also contains the most common representations, perturbat
`constr_hw02` is the implementation of hw02. It contains the problem definitions and a simple CLI
program to run any of the problems with either stochastic ranking or with MOE.
-`tsp_plotter` plots the graphs. It can now be used forjboth homework.
`tsp_hw01` is folder with source of the first folder, kept for my convenience as I keep them in the same repo
and would have to be changing Cargo.toml properly to make sure the build doesn't fail if it's not present.
+`tsp_plotter` plots the graphs of hw01 only.
+
+`py_plotter` for the second homework a python plotter has been utilized for simplicity. It uses
+numpy, pandas and matplotlib.
As part of the work, apart from the standard library, third party libraries have been used, namely (the list is the same as for hw01):
- nalgebra for working with vertices and matrices,
M manifest.scm => manifest.scm +5 -0
@@ 7,6 7,11 @@
"rust-analyzer"
"rust:cargo"
+ "python"
+ "python-matplotlib"
+ "python-numpy@1"
+ "python-pandas"
+
"pkg-config"
"fontconfig"
"freetype"))