nmoo.algorithms.ar_nsga2

Accumulative resampling algorithm wrapper for noisy single/multi-objective problems.

  1"""
  2Accumulative resampling algorithm wrapper for noisy single/multi-objective
  3problems.
  4"""
  5__docformat__ = "google"
  6
  7from itertools import product
  8from typing import Dict, Iterable, List, Optional, Tuple, Union
  9
 10import numpy as np
 11from loguru import logger as logging
 12from pymoo.algorithms.moo.nsga2 import NSGA2
 13from pymoo.core.individual import Individual
 14from pymoo.core.population import Population
 15from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
 16
 17from nmoo.wrapped_problem import WrappedProblem
 18
 19
 20class TerminationCriterionMet(Exception):
 21    """
 22    Raised by `ARDEMO._evaluate_individual` if the termination criterion has
 23    been met.
 24    """
 25
 26
 27class _Individual(Individual):
 28    """
 29    An [pymoo
 30    `Individual`](https://github.com/anyoptimization/pymoo/blob/master/pymoo/core/individual.py)
 31    but where attributes `F`, `G`, `dF`, `dG`, `ddF`, `ddG`, and `CV` are
 32    maximum likelyhood estimates of the true values.
 33    """
 34
 35    _sample_generations: List[int]
 36    """
 37    Generation number at which samples have been taken, i.e.
 38    `_sample_generations[i]` is the generation at which `_samples[*][i]` has
 39    been measured.
 40    """
 41
 42    _samples: Dict[str, np.ndarray]
 43
 44    generation_born: int
 45    """Generation at which this individual was born"""
 46
 47    def __init__(self, generation_born: int, *args, **kwargs) -> None:
 48        super().__init__(*args, **kwargs)
 49        self._sample_generations = []
 50        self._samples = {}
 51        self.generation_born = generation_born
 52
 53    def get_estimate(
 54        self, key: str, at_generation: Optional[int] = None
 55    ) -> np.ndarray:
 56        """
 57        Return the maximum likelyhood estimate for for value of `key`, using
 58        the samples that were made up to a given generation. If that generation
 59        limit is left to `None`, all samples are used. In this case, using
 60        direct attribute access produces the same result, e.g.
 61
 62            ind.get_estimate("F")
 63            # is equivalent to
 64            ind.F
 65
 66        (assuming `ind` has been correctly `update`d).
 67        """
 68        samples = self._samples[key]
 69        if at_generation is not None:
 70            mask = np.array(self._sample_generations) <= at_generation
 71            samples = samples[mask]
 72        return samples.mean(axis=0)
 73
 74    def update(self, current_generation: int) -> None:
 75        """
 76        Adds the current value of `F`, `G` etc. to the sample arrays, and
 77        reverts the value of `F`, `G` etc. to the maximum likelyhood estimate.
 78
 79        Here's the thing. When an evaluator `_eval`uates an individual, it
 80        changes its `F`, `G` etc. directly using `Population.set`. The easiest
 81        way to adapt this workflow to our needs is to accept that `self.F` will
 82        have a dual nature: either the latest evaluation (or sampling) of the
 83        objective function, or the maximum likelyhood estimate.
 84        """
 85        for key in ["F", "G", "dF", "dG", "ddF", "ddG", "CV"]:
 86            value = self.__dict__.get(key)
 87            if not isinstance(value, np.ndarray):
 88                break
 89            if key not in self._samples:
 90                self._samples[key] = value[np.newaxis]
 91            else:
 92                self._samples[key] = np.append(
 93                    self._samples[key], [value], axis=0
 94                )
 95            self.__dict__[key] = self.get_estimate(key)
 96        self._sample_generations.append(current_generation)
 97
 98    def n_eval(self, up_to_generation: Optional[int] = None) -> int:
 99        """
100        The number of times this individual has been sampled up to a given
101        generation. If left to `None`, all generations are considered.
102        """
103        if up_to_generation is None:
104            return len(self._sample_generations)
105        return len(
106            list(
107                filter(
108                    lambda x: x <= up_to_generation,  # type: ignore
109                    self._sample_generations,
110                )
111            )
112        )
113
114
115# pylint: disable=too-many-instance-attributes
116class ARNSGA2(NSGA2):
117    """
118    Accumulative resampling algorithm wrapper for noisy single/multi-objective
119    problems. The accumulative resampling methods are from [^elite].
120
121    [^elite]: Fieldsend, J.E. (2015). Elite Accumulative Sampling Strategies
122        for Noisy Multi-objective Optimisation. In: Gaspar-Cunha, A., Henggeler
123        Antunes, C., Coello, C. (eds) Evolutionary Multi-Criterion
124        Optimization. EMO 2015. Lecture Notes in Computer Science(), vol 9019.
125        Springer, Cham. https://doi.org/10.1007/978-3-319-15892-1_12
126    """
127
128    SUPPORTED_RESAMPLING_METHODS = [
129        "elite",
130        "fixed",
131        "min_on_conv",
132        "rate_on_conv",
133    ]
134
135    # pymoo inherited properties
136    pop: Iterable[_Individual]
137    n_gen: int
138
139    _convergence_time_window: int
140    """
141    Convergence time window for method 2 and 3, (denoted by $m$ in Feidlsend's
142    paper)
143    """
144
145    _demo_crossover_probability: float
146    """Differential evolution parameter"""
147
148    _demo_scaling_factor: float
149    """Differential evolution parameter"""
150
151    _resampling_elite_cache: Dict[int, Tuple[int, int]] = {}
152    """
153    At key `t`, contains the average number of resamplings of Pareto
154    individuals at generation `t`, and the size of the Pareto population at
155    generation `t`. Used as caching for `_resampling_elite`.
156    """
157
158    _resample_number: int = 1
159    """Resample number for methods 2 (denoted by $k$ in Feidlsend's paper)."""
160
161    _resampling_method: str
162    """Algorithm used for resampling. See `ARDEMO.__init__`"""
163
164    _rng: np.random.Generator
165
166    def __init__(
167        self,
168        resampling_method: str = "fixed",
169        convergence_time_window: int = 5,
170        **kwargs,
171    ):
172        """
173        Args:
174            algorithm: Single/multi-objective optimization algorithm.
175            convergence_time_window (int): Convergence time window for method 2
176                and 3, (denoted by $m$ in [^elite])
177            resampling_method (str): Resampling method
178                * `fixed`: resampling rate is fixed; corresponds to algorithm 1
179                  in [^elite];
180                * `rate_on_conv`: resampling rate may increase based on a
181                  convergence assessment that uses the $\\varepsilon +$
182                  indicator; corresponds to algorithm 2 in [^elite];
183                * `min_on_conv`: resampling rate *of elite members* may
184                  increase based on a convergence assessment that uses the
185                  $\\varepsilon +$ indicator; corresponds to algorithm 3 in
186                  [^elite];
187                * `elite`: resample counts of elite members increases over
188                  time; corresponds to algorithm 4 in [^elite].
189
190        [^elite]: Fieldsend, J.E. (2015). Elite Accumulative Sampling
191            Strategies for Noisy Multi-objective Optimisation. In:
192            Gaspar-Cunha, A., Henggeler Antunes, C., Coello, C. (eds)
193            Evolutionary Multi-Criterion Optimization. EMO 2015. Lecture Notes
194            in Computer Science(), vol 9019. Springer, Cham.
195            https://doi.org/10.1007/978-3-319-15892-1_12
196        """
197        super().__init__(**kwargs)
198        if resampling_method not in self.SUPPORTED_RESAMPLING_METHODS:
199            raise ValueError(
200                "Invalid resampling method. Supported methods are "
201                + ", ".join(self.SUPPORTED_RESAMPLING_METHODS)
202            )
203        self._resampling_method = resampling_method
204        self._rng = np.random.default_rng()
205        self._convergence_time_window = convergence_time_window
206
207    def _do_resampling(self) -> None:
208        """
209        Dispatches to `_resampling_elite`, `_resampling_fixed`,
210        `_resampling_min_on_conv` or `_resampling_rate_on_conv` depending on
211        the value of `_resampling_method`. Also catches
212        `TerminationCriterionMet` exceptions.
213        """
214        method = {
215            "fixed": self._resampling_fixed,
216            "rate_on_conv": self._resampling_rate_on_conv,
217            "min_on_conv": self._resampling_min_on_conv,
218            "elite": self._resampling_elite,
219        }.get(self._resampling_method)
220        if method is None:
221            logging.warning(
222                "Invalid resampling method {}", self._resampling_method
223            )
224            return
225        try:
226            for _ in range(self.n_offsprings):
227                method()
228        except TerminationCriterionMet:
229            return
230
231    def _evaluate_individual(self, individual: _Individual) -> None:
232        """Evaluates and updates an individual."""
233        if self.n_gen >= 2 and self.termination.has_terminated(self):
234            raise TerminationCriterionMet()
235        self.evaluator.eval(
236            self.problem, individual, skip_already_evaluated=False
237        )
238        individual.update(self.n_gen)
239        # Little hack so that WrappedProblem's see this evaluation as part of
240        # the same batch as the infills of this generation
241        problem = self.problem
242        while isinstance(problem, WrappedProblem):
243            problem._current_history_batch = self.n_gen
244            problem._history["_batch"][-1] = self.n_gen
245            problem = problem._problem
246
247    def _non_dominated_sort(self, generation: int) -> List[np.ndarray]:
248        """
249        Non-dominated sort ranks of the population at a given generation
250        """
251        population = self._population_at_gen(generation)
252        if len(population) == 0:
253            return []
254        sorter = NonDominatedSorting(method="efficient_non_dominated_sort")
255        ranks = sorter.do(
256            np.array(
257                [
258                    p.get_estimate("F", generation)
259                    for p in population
260                    if p.generation_born <= generation
261                ]
262            )
263        )
264        return ranks
265
266    def _pareto_population(
267        self, generation: Optional[int] = None
268    ) -> List[_Individual]:
269        """
270        Returns the Pareto (aka elite) individuals among all individual born at
271        or before the given generation. By default, the current generation is
272        used.
273        """
274        if generation is None:
275            generation = self.n_gen
276        population = self._population_at_gen(generation)
277        ranks = self._non_dominated_sort(generation)
278        return [p for i, p in enumerate(population) if i in ranks[0]]
279
280    def _population_at_gen(self, generation: int) -> List[_Individual]:
281        """
282        Returns the population of all individual born at or before the given
283        timestep.
284        """
285        return list(
286            filter(lambda p: p.generation_born <= generation, self.pop)
287        )
288
289    def _reevaluate_individual_with_fewest_resamples(
290        self, population: List[_Individual]
291    ) -> None:
292        """
293        Randomly choose an individual in the given population that has the
294        fewest number of resamples, and reevaluates it. Returns the individual
295        in question.
296        """
297        counts = np.array([p.n_eval() for p in population])
298        index = self._rng.choice(np.where(counts == counts.min())[0])
299        self._evaluate_individual(population[index])
300
301    def _resampling_elite(self) -> None:
302        """
303        Resample counts of elite members increases over time. Corresponds to
304        algorithm 4 in Fieldsend's paper.
305        """
306
307        def _mean_n_eval_pareto() -> float:
308            """
309            Average number of times an individual in the current Pareto
310            population has been evaluated. This is called
311            `mean_num_resamp(A_t)` in Fieldsend's paper.
312            """
313            return np.mean(
314                [p.n_eval(self.n_gen) for p in self._pareto_population()]
315            )
316
317        pareto_population = self._pareto_population()
318        arr = [p.n_eval(self.n_gen) for p in pareto_population]
319        self._resampling_elite_cache[self.n_gen] = (
320            np.mean(arr),
321            len(arr),
322        )
323        self._reevaluate_individual_with_fewest_resamples(pareto_population)
324        alpha = sum(
325            [m * s for (m, s) in self._resampling_elite_cache.values()]
326        ) / sum([s for (_, s) in self._resampling_elite_cache.values()])
327        while _mean_n_eval_pareto() <= alpha:
328            self._reevaluate_individual_with_fewest_resamples(
329                self._pareto_population()
330            )
331
332    def _resampling_fixed(self) -> None:
333        """
334        Resampling rate is fixed. Corresponds to algorithm 1 in Fieldsend's
335        paper.
336        """
337        self._reevaluate_individual_with_fewest_resamples(
338            self._pareto_population()
339        )
340
341    def _resampling_min_on_conv(self) -> None:
342        """
343        Resampling rate *of elite members* may increase based on a convergence
344        assessment that uses the $\\varepsilon +$ indicator. Corresponds to
345        algorithm 3 in Fieldsend's paper.
346        """
347        # TODO: Deduplicate code
348        # Generation m+1, 2m+1, 3m+1, etc. where
349        # m = self._convergence_time_window
350        if self.n_gen > 1 and self.n_gen % self._convergence_time_window == 1:
351            p1 = self._pareto_population()
352            p2 = self._pareto_population(
353                self.n_gen - self._convergence_time_window
354            )
355            # It could be that no one from n_gen - _convergence_time_window is
356            # still alive...
357            if len(p2) != 0:
358                a1 = extended_epsilon_plus_indicator(p1, p2)
359                a2 = extended_epsilon_plus_indicator(p2, p1)
360                if a1 > a2:
361                    self._resample_number += 1
362        self._reevaluate_individual_with_fewest_resamples(
363            self._pareto_population()
364        )
365        while True:
366            population = self._pareto_population()
367            if min([p.n_eval() for p in population]) >= self._resample_number:
368                break
369            self._reevaluate_individual_with_fewest_resamples(population)
370
371    def _resampling_rate_on_conv(self) -> None:
372        """
373        Resampling rate may increase based on a convergence assessment that
374        uses the $\\varepsilon +$ indicator. Corresponds to algorithm 2 in
375        Fieldsend's paper.
376        """
377        # Generation m+1, 2m+1, 3m+1, etc. where
378        # m = self._convergence_time_window
379        if self.n_gen > 1 and self.n_gen % self._convergence_time_window == 1:
380            p1 = self._pareto_population()
381            p2 = self._pareto_population(
382                self.n_gen - self._convergence_time_window
383            )
384            # It could be that no one from n_gen - _convergence_time_window is
385            # still alive...
386            if len(p2) != 0:
387                a1 = extended_epsilon_plus_indicator(p1, p2)
388                a2 = extended_epsilon_plus_indicator(p2, p1)
389                if a1 > a2:
390                    self._resample_number += 1
391        for _ in range(self._resample_number):
392            self._reevaluate_individual_with_fewest_resamples(
393                self._pareto_population(),
394            )
395
396    def _update_infills(
397        self, infills: Optional[Union[_Individual, Iterable[_Individual]]]
398    ) -> None:
399        """
400        Takes evaluated infills of type `_Individual` and `_Individual.update`s
401        them.
402        """
403        if infills is None:
404            raise ValueError(
405                "ARNSGA2's _advance needs the current iteration's infills"
406            )
407        if isinstance(infills, _Individual):
408            infills = [infills]
409        for p in infills:
410            p.update(self.n_gen)
411
412    # pymoo overrides =========================================================
413
414    def _advance(
415        self,
416        infills: Optional[Union[_Individual, Iterable[_Individual]]] = None,
417        **kwargs,
418    ) -> None:
419        """
420        Called after the infills (aka new individuals) have been evaluated.
421        """
422        self._update_infills(infills)
423        super()._advance(infills, **kwargs)
424        self._do_resampling()
425
426    def _infill(self) -> Population:
427        """
428        Generate new individuals for the next generation. Uses 3-way mating
429        (kinky) and binary crosseover. The new individual is added to the
430        algorithm's population but is not evaluated.
431        """
432        population = super()._infill()
433        # When this method is called, self.n_gen has not yet been incremented
434        # by Algorithm.advance !
435        return Population.create(
436            *[_Individual(self.n_gen + 1, X=p.X) for p in population]
437        )
438
439    def _initialize_advance(self, infills=None, **kwargs) -> None:
440        """Only called after the first generation has been evaluated"""
441        self._update_infills(infills)
442        super()._initialize_advance(infills, **kwargs)
443        self._do_resampling()
444
445    def _initialize_infill(self) -> Population:
446        """
447        Only called to get the first generation. Subsequent generations are
448        generated by calling `_infill`.
449        """
450        population = super()._initialize_infill()
451        return Population.create(*[_Individual(1, X=p.X) for p in population])
452
453    def _setup(self, problem, **kwargs) -> None:
454        """Called before an algorithm starts running on a problem"""
455        super()._setup(problem, **kwargs)
456        self._rng = np.random.default_rng(kwargs.get("seed"))
457        self._resampling_elite_cache = {}
458        self._resample_number = 1
459
460
461def extended_epsilon_plus_indicator(
462    population_1: Iterable[Individual], population_2: Iterable[Individual]
463) -> float:
464    """
465    Extended $\\varepsilon+$ indicator:
466    $$
467        I (A, B) = \\max_{a, b, i} F_i (b) - F_i (a)
468    $$
469    where $A$ and $B$ are two populations, and where $F_i$ is the $i$-th
470    objective.
471    """
472    arr = map(
473        lambda t: np.max(t[1].F - t[0].F), product(population_1, population_2)
474    )
475    return max(list(arr))
class TerminationCriterionMet(builtins.Exception):
21class TerminationCriterionMet(Exception):
22    """
23    Raised by `ARDEMO._evaluate_individual` if the termination criterion has
24    been met.
25    """

Raised by ARDEMO._evaluate_individual if the termination criterion has been met.

Inherited Members
builtins.Exception
Exception
builtins.BaseException
with_traceback
class ARNSGA2(pymoo.algorithms.moo.nsga2.NSGA2):
117class ARNSGA2(NSGA2):
118    """
119    Accumulative resampling algorithm wrapper for noisy single/multi-objective
120    problems. The accumulative resampling methods are from [^elite].
121
122    [^elite]: Fieldsend, J.E. (2015). Elite Accumulative Sampling Strategies
123        for Noisy Multi-objective Optimisation. In: Gaspar-Cunha, A., Henggeler
124        Antunes, C., Coello, C. (eds) Evolutionary Multi-Criterion
125        Optimization. EMO 2015. Lecture Notes in Computer Science(), vol 9019.
126        Springer, Cham. https://doi.org/10.1007/978-3-319-15892-1_12
127    """
128
129    SUPPORTED_RESAMPLING_METHODS = [
130        "elite",
131        "fixed",
132        "min_on_conv",
133        "rate_on_conv",
134    ]
135
136    # pymoo inherited properties
137    pop: Iterable[_Individual]
138    n_gen: int
139
140    _convergence_time_window: int
141    """
142    Convergence time window for method 2 and 3, (denoted by $m$ in Feidlsend's
143    paper)
144    """
145
146    _demo_crossover_probability: float
147    """Differential evolution parameter"""
148
149    _demo_scaling_factor: float
150    """Differential evolution parameter"""
151
152    _resampling_elite_cache: Dict[int, Tuple[int, int]] = {}
153    """
154    At key `t`, contains the average number of resamplings of Pareto
155    individuals at generation `t`, and the size of the Pareto population at
156    generation `t`. Used as caching for `_resampling_elite`.
157    """
158
159    _resample_number: int = 1
160    """Resample number for methods 2 (denoted by $k$ in Feidlsend's paper)."""
161
162    _resampling_method: str
163    """Algorithm used for resampling. See `ARDEMO.__init__`"""
164
165    _rng: np.random.Generator
166
167    def __init__(
168        self,
169        resampling_method: str = "fixed",
170        convergence_time_window: int = 5,
171        **kwargs,
172    ):
173        """
174        Args:
175            algorithm: Single/multi-objective optimization algorithm.
176            convergence_time_window (int): Convergence time window for method 2
177                and 3, (denoted by $m$ in [^elite])
178            resampling_method (str): Resampling method
179                * `fixed`: resampling rate is fixed; corresponds to algorithm 1
180                  in [^elite];
181                * `rate_on_conv`: resampling rate may increase based on a
182                  convergence assessment that uses the $\\varepsilon +$
183                  indicator; corresponds to algorithm 2 in [^elite];
184                * `min_on_conv`: resampling rate *of elite members* may
185                  increase based on a convergence assessment that uses the
186                  $\\varepsilon +$ indicator; corresponds to algorithm 3 in
187                  [^elite];
188                * `elite`: resample counts of elite members increases over
189                  time; corresponds to algorithm 4 in [^elite].
190
191        [^elite]: Fieldsend, J.E. (2015). Elite Accumulative Sampling
192            Strategies for Noisy Multi-objective Optimisation. In:
193            Gaspar-Cunha, A., Henggeler Antunes, C., Coello, C. (eds)
194            Evolutionary Multi-Criterion Optimization. EMO 2015. Lecture Notes
195            in Computer Science(), vol 9019. Springer, Cham.
196            https://doi.org/10.1007/978-3-319-15892-1_12
197        """
198        super().__init__(**kwargs)
199        if resampling_method not in self.SUPPORTED_RESAMPLING_METHODS:
200            raise ValueError(
201                "Invalid resampling method. Supported methods are "
202                + ", ".join(self.SUPPORTED_RESAMPLING_METHODS)
203            )
204        self._resampling_method = resampling_method
205        self._rng = np.random.default_rng()
206        self._convergence_time_window = convergence_time_window
207
208    def _do_resampling(self) -> None:
209        """
210        Dispatches to `_resampling_elite`, `_resampling_fixed`,
211        `_resampling_min_on_conv` or `_resampling_rate_on_conv` depending on
212        the value of `_resampling_method`. Also catches
213        `TerminationCriterionMet` exceptions.
214        """
215        method = {
216            "fixed": self._resampling_fixed,
217            "rate_on_conv": self._resampling_rate_on_conv,
218            "min_on_conv": self._resampling_min_on_conv,
219            "elite": self._resampling_elite,
220        }.get(self._resampling_method)
221        if method is None:
222            logging.warning(
223                "Invalid resampling method {}", self._resampling_method
224            )
225            return
226        try:
227            for _ in range(self.n_offsprings):
228                method()
229        except TerminationCriterionMet:
230            return
231
232    def _evaluate_individual(self, individual: _Individual) -> None:
233        """Evaluates and updates an individual."""
234        if self.n_gen >= 2 and self.termination.has_terminated(self):
235            raise TerminationCriterionMet()
236        self.evaluator.eval(
237            self.problem, individual, skip_already_evaluated=False
238        )
239        individual.update(self.n_gen)
240        # Little hack so that WrappedProblem's see this evaluation as part of
241        # the same batch as the infills of this generation
242        problem = self.problem
243        while isinstance(problem, WrappedProblem):
244            problem._current_history_batch = self.n_gen
245            problem._history["_batch"][-1] = self.n_gen
246            problem = problem._problem
247
248    def _non_dominated_sort(self, generation: int) -> List[np.ndarray]:
249        """
250        Non-dominated sort ranks of the population at a given generation
251        """
252        population = self._population_at_gen(generation)
253        if len(population) == 0:
254            return []
255        sorter = NonDominatedSorting(method="efficient_non_dominated_sort")
256        ranks = sorter.do(
257            np.array(
258                [
259                    p.get_estimate("F", generation)
260                    for p in population
261                    if p.generation_born <= generation
262                ]
263            )
264        )
265        return ranks
266
267    def _pareto_population(
268        self, generation: Optional[int] = None
269    ) -> List[_Individual]:
270        """
271        Returns the Pareto (aka elite) individuals among all individual born at
272        or before the given generation. By default, the current generation is
273        used.
274        """
275        if generation is None:
276            generation = self.n_gen
277        population = self._population_at_gen(generation)
278        ranks = self._non_dominated_sort(generation)
279        return [p for i, p in enumerate(population) if i in ranks[0]]
280
281    def _population_at_gen(self, generation: int) -> List[_Individual]:
282        """
283        Returns the population of all individual born at or before the given
284        timestep.
285        """
286        return list(
287            filter(lambda p: p.generation_born <= generation, self.pop)
288        )
289
290    def _reevaluate_individual_with_fewest_resamples(
291        self, population: List[_Individual]
292    ) -> None:
293        """
294        Randomly choose an individual in the given population that has the
295        fewest number of resamples, and reevaluates it. Returns the individual
296        in question.
297        """
298        counts = np.array([p.n_eval() for p in population])
299        index = self._rng.choice(np.where(counts == counts.min())[0])
300        self._evaluate_individual(population[index])
301
302    def _resampling_elite(self) -> None:
303        """
304        Resample counts of elite members increases over time. Corresponds to
305        algorithm 4 in Fieldsend's paper.
306        """
307
308        def _mean_n_eval_pareto() -> float:
309            """
310            Average number of times an individual in the current Pareto
311            population has been evaluated. This is called
312            `mean_num_resamp(A_t)` in Fieldsend's paper.
313            """
314            return np.mean(
315                [p.n_eval(self.n_gen) for p in self._pareto_population()]
316            )
317
318        pareto_population = self._pareto_population()
319        arr = [p.n_eval(self.n_gen) for p in pareto_population]
320        self._resampling_elite_cache[self.n_gen] = (
321            np.mean(arr),
322            len(arr),
323        )
324        self._reevaluate_individual_with_fewest_resamples(pareto_population)
325        alpha = sum(
326            [m * s for (m, s) in self._resampling_elite_cache.values()]
327        ) / sum([s for (_, s) in self._resampling_elite_cache.values()])
328        while _mean_n_eval_pareto() <= alpha:
329            self._reevaluate_individual_with_fewest_resamples(
330                self._pareto_population()
331            )
332
333    def _resampling_fixed(self) -> None:
334        """
335        Resampling rate is fixed. Corresponds to algorithm 1 in Fieldsend's
336        paper.
337        """
338        self._reevaluate_individual_with_fewest_resamples(
339            self._pareto_population()
340        )
341
342    def _resampling_min_on_conv(self) -> None:
343        """
344        Resampling rate *of elite members* may increase based on a convergence
345        assessment that uses the $\\varepsilon +$ indicator. Corresponds to
346        algorithm 3 in Fieldsend's paper.
347        """
348        # TODO: Deduplicate code
349        # Generation m+1, 2m+1, 3m+1, etc. where
350        # m = self._convergence_time_window
351        if self.n_gen > 1 and self.n_gen % self._convergence_time_window == 1:
352            p1 = self._pareto_population()
353            p2 = self._pareto_population(
354                self.n_gen - self._convergence_time_window
355            )
356            # It could be that no one from n_gen - _convergence_time_window is
357            # still alive...
358            if len(p2) != 0:
359                a1 = extended_epsilon_plus_indicator(p1, p2)
360                a2 = extended_epsilon_plus_indicator(p2, p1)
361                if a1 > a2:
362                    self._resample_number += 1
363        self._reevaluate_individual_with_fewest_resamples(
364            self._pareto_population()
365        )
366        while True:
367            population = self._pareto_population()
368            if min([p.n_eval() for p in population]) >= self._resample_number:
369                break
370            self._reevaluate_individual_with_fewest_resamples(population)
371
372    def _resampling_rate_on_conv(self) -> None:
373        """
374        Resampling rate may increase based on a convergence assessment that
375        uses the $\\varepsilon +$ indicator. Corresponds to algorithm 2 in
376        Fieldsend's paper.
377        """
378        # Generation m+1, 2m+1, 3m+1, etc. where
379        # m = self._convergence_time_window
380        if self.n_gen > 1 and self.n_gen % self._convergence_time_window == 1:
381            p1 = self._pareto_population()
382            p2 = self._pareto_population(
383                self.n_gen - self._convergence_time_window
384            )
385            # It could be that no one from n_gen - _convergence_time_window is
386            # still alive...
387            if len(p2) != 0:
388                a1 = extended_epsilon_plus_indicator(p1, p2)
389                a2 = extended_epsilon_plus_indicator(p2, p1)
390                if a1 > a2:
391                    self._resample_number += 1
392        for _ in range(self._resample_number):
393            self._reevaluate_individual_with_fewest_resamples(
394                self._pareto_population(),
395            )
396
397    def _update_infills(
398        self, infills: Optional[Union[_Individual, Iterable[_Individual]]]
399    ) -> None:
400        """
401        Takes evaluated infills of type `_Individual` and `_Individual.update`s
402        them.
403        """
404        if infills is None:
405            raise ValueError(
406                "ARNSGA2's _advance needs the current iteration's infills"
407            )
408        if isinstance(infills, _Individual):
409            infills = [infills]
410        for p in infills:
411            p.update(self.n_gen)
412
413    # pymoo overrides =========================================================
414
415    def _advance(
416        self,
417        infills: Optional[Union[_Individual, Iterable[_Individual]]] = None,
418        **kwargs,
419    ) -> None:
420        """
421        Called after the infills (aka new individuals) have been evaluated.
422        """
423        self._update_infills(infills)
424        super()._advance(infills, **kwargs)
425        self._do_resampling()
426
427    def _infill(self) -> Population:
428        """
429        Generate new individuals for the next generation. Uses 3-way mating
430        (kinky) and binary crosseover. The new individual is added to the
431        algorithm's population but is not evaluated.
432        """
433        population = super()._infill()
434        # When this method is called, self.n_gen has not yet been incremented
435        # by Algorithm.advance !
436        return Population.create(
437            *[_Individual(self.n_gen + 1, X=p.X) for p in population]
438        )
439
440    def _initialize_advance(self, infills=None, **kwargs) -> None:
441        """Only called after the first generation has been evaluated"""
442        self._update_infills(infills)
443        super()._initialize_advance(infills, **kwargs)
444        self._do_resampling()
445
446    def _initialize_infill(self) -> Population:
447        """
448        Only called to get the first generation. Subsequent generations are
449        generated by calling `_infill`.
450        """
451        population = super()._initialize_infill()
452        return Population.create(*[_Individual(1, X=p.X) for p in population])
453
454    def _setup(self, problem, **kwargs) -> None:
455        """Called before an algorithm starts running on a problem"""
456        super()._setup(problem, **kwargs)
457        self._rng = np.random.default_rng(kwargs.get("seed"))
458        self._resampling_elite_cache = {}
459        self._resample_number = 1

Accumulative resampling algorithm wrapper for noisy single/multi-objective problems. The accumulative resampling methods are from 1.


  1. Fieldsend, J.E. (2015). Elite Accumulative Sampling Strategies for Noisy Multi-objective Optimisation. In: Gaspar-Cunha, A., Henggeler Antunes, C., Coello, C. (eds) Evolutionary Multi-Criterion Optimization. EMO 2015. Lecture Notes in Computer Science(), vol 9019. Springer, Cham. https://doi.org/10.1007/978-3-319-15892-1_12 

ARNSGA2( resampling_method: str = 'fixed', convergence_time_window: int = 5, **kwargs)
167    def __init__(
168        self,
169        resampling_method: str = "fixed",
170        convergence_time_window: int = 5,
171        **kwargs,
172    ):
173        """
174        Args:
175            algorithm: Single/multi-objective optimization algorithm.
176            convergence_time_window (int): Convergence time window for method 2
177                and 3, (denoted by $m$ in [^elite])
178            resampling_method (str): Resampling method
179                * `fixed`: resampling rate is fixed; corresponds to algorithm 1
180                  in [^elite];
181                * `rate_on_conv`: resampling rate may increase based on a
182                  convergence assessment that uses the $\\varepsilon +$
183                  indicator; corresponds to algorithm 2 in [^elite];
184                * `min_on_conv`: resampling rate *of elite members* may
185                  increase based on a convergence assessment that uses the
186                  $\\varepsilon +$ indicator; corresponds to algorithm 3 in
187                  [^elite];
188                * `elite`: resample counts of elite members increases over
189                  time; corresponds to algorithm 4 in [^elite].
190
191        [^elite]: Fieldsend, J.E. (2015). Elite Accumulative Sampling
192            Strategies for Noisy Multi-objective Optimisation. In:
193            Gaspar-Cunha, A., Henggeler Antunes, C., Coello, C. (eds)
194            Evolutionary Multi-Criterion Optimization. EMO 2015. Lecture Notes
195            in Computer Science(), vol 9019. Springer, Cham.
196            https://doi.org/10.1007/978-3-319-15892-1_12
197        """
198        super().__init__(**kwargs)
199        if resampling_method not in self.SUPPORTED_RESAMPLING_METHODS:
200            raise ValueError(
201                "Invalid resampling method. Supported methods are "
202                + ", ".join(self.SUPPORTED_RESAMPLING_METHODS)
203            )
204        self._resampling_method = resampling_method
205        self._rng = np.random.default_rng()
206        self._convergence_time_window = convergence_time_window
Arguments:
  • algorithm: Single/multi-objective optimization algorithm.
  • convergence_time_window (int): Convergence time window for method 2 and 3, (denoted by $m$ in 1)
  • resampling_method (str): Resampling method
    • fixed: resampling rate is fixed; corresponds to algorithm 1 in 2;
    • rate_on_conv: resampling rate may increase based on a convergence assessment that uses the $\varepsilon +$ indicator; corresponds to algorithm 2 in 3;
    • min_on_conv: resampling rate of elite members may increase based on a convergence assessment that uses the $\varepsilon +$ indicator; corresponds to algorithm 3 in 4;
    • elite: resample counts of elite members increases over time; corresponds to algorithm 4 in 5.

  1. Fieldsend, J.E. (2015). Elite Accumulative Sampling Strategies for Noisy Multi-objective Optimisation. In: Gaspar-Cunha, A., Henggeler Antunes, C., Coello, C. (eds) Evolutionary Multi-Criterion Optimization. EMO 2015. Lecture Notes in Computer Science(), vol 9019. Springer, Cham. https://doi.org/10.1007/978-3-319-15892-1_12 

  2. Fieldsend, J.E. (2015). Elite Accumulative Sampling Strategies for Noisy Multi-objective Optimisation. In: Gaspar-Cunha, A., Henggeler Antunes, C., Coello, C. (eds) Evolutionary Multi-Criterion Optimization. EMO 2015. Lecture Notes in Computer Science(), vol 9019. Springer, Cham. https://doi.org/10.1007/978-3-319-15892-1_12 

  3. Fieldsend, J.E. (2015). Elite Accumulative Sampling Strategies for Noisy Multi-objective Optimisation. In: Gaspar-Cunha, A., Henggeler Antunes, C., Coello, C. (eds) Evolutionary Multi-Criterion Optimization. EMO 2015. Lecture Notes in Computer Science(), vol 9019. Springer, Cham. https://doi.org/10.1007/978-3-319-15892-1_12 

  4. Fieldsend, J.E. (2015). Elite Accumulative Sampling Strategies for Noisy Multi-objective Optimisation. In: Gaspar-Cunha, A., Henggeler Antunes, C., Coello, C. (eds) Evolutionary Multi-Criterion Optimization. EMO 2015. Lecture Notes in Computer Science(), vol 9019. Springer, Cham. https://doi.org/10.1007/978-3-319-15892-1_12 

  5. Fieldsend, J.E. (2015). Elite Accumulative Sampling Strategies for Noisy Multi-objective Optimisation. In: Gaspar-Cunha, A., Henggeler Antunes, C., Coello, C. (eds) Evolutionary Multi-Criterion Optimization. EMO 2015. Lecture Notes in Computer Science(), vol 9019. Springer, Cham. https://doi.org/10.1007/978-3-319-15892-1_12 

Inherited Members
pymoo.core.algorithm.Algorithm
setup
run
has_next
finalize
next
infill
advance
result
ask
tell
def extended_epsilon_plus_indicator( population_1: Iterable[pymoo.core.individual.Individual], population_2: Iterable[pymoo.core.individual.Individual]) -> float:
462def extended_epsilon_plus_indicator(
463    population_1: Iterable[Individual], population_2: Iterable[Individual]
464) -> float:
465    """
466    Extended $\\varepsilon+$ indicator:
467    $$
468        I (A, B) = \\max_{a, b, i} F_i (b) - F_i (a)
469    $$
470    where $A$ and $B$ are two populations, and where $F_i$ is the $i$-th
471    objective.
472    """
473    arr = map(
474        lambda t: np.max(t[1].F - t[0].F), product(population_1, population_2)
475    )
476    return max(list(arr))

Extended $\varepsilon+$ indicator: $$ I (A, B) = \max_{a, b, i} F_i (b) - F_i (a) $$ where $A$ and $B$ are two populations, and where $F_i$ is the $i$-th objective.