nmoo.denoisers.gpss

Gaussian process spectral sampling

  1"""Gaussian process spectral sampling"""
  2__docformat__ = "google"
  3
  4from typing import Any, Dict, Optional
  5
  6import numpy as np
  7from pymoo.core.problem import Problem
  8from gradient_free_optimizers import ParticleSwarmOptimizer
  9from scipy.special import erfinv
 10
 11from nmoo.wrapped_problem import WrappedProblem
 12
 13
 14# pylint: disable=too-many-instance-attributes
 15class GPSS(WrappedProblem):
 16    """
 17    Implementation of the gaussian process spectral sampling method described
 18    in [^tsemo]. Reference implementation:
 19    https://github.com/Eric-Bradford/TS-EMO/blob/master/TSEMO_V4.m .
 20
 21    [^tsemo] Bradford, E., Schweidtmann, A.M. & Lapkin, A. Efficient
 22        multiobjective optimization employing Gaussian processes, spectral
 23        sampling and a genetic algorithm. J Glob Optim 71, 407–438 (2018).
 24        https://doi.org/10.1007/s10898-018-0609-2
 25    """
 26
 27    _generator: np.random.Generator
 28    """Random number generator."""
 29
 30    _n_mc_samples: int
 31    """Number of Monte-Carlo samples"""
 32
 33    _nu: Optional[int]
 34    """Smoothness parameter of the Matérn covariance function $k$"""
 35
 36    _xi: np.ndarray
 37    """Hyperparameters. See `__init__`."""
 38
 39    _xi_map_search_n_iter: int
 40    """Number of iterations for the search of the MAP estimate of $\\xi$"""
 41
 42    _xi_map_search_exclude_percentile: float
 43    """Area where to **not** search for MAP estimate of $\\xi$"""
 44
 45    _xi_prior_mean: np.ndarray
 46    """Prior means of the components of $\\xi$"""
 47
 48    _xi_prior_std: np.ndarray
 49    """Prior variances of the components of $\\xi$"""
 50
 51    def __init__(
 52        self,
 53        problem: Problem,
 54        xi_prior_mean: np.ndarray,
 55        xi_prior_std: np.ndarray,
 56        nu: Optional[int] = None,
 57        n_mc_samples: int = 4000,
 58        xi_map_search_n_iter: int = 100,
 59        xi_map_search_exclude_percentile: float = 0.1,
 60        seed: Any = None,
 61        copy_problem: bool = True,
 62        name: str = "gpss",
 63    ):
 64        """
 65        Args:
 66            xi_prior_mean: Means of the (univariate) normal distributions that
 67                components of $\\xi$ are assumed to follow. Recall that $\\xi =
 68                [\\log \\lambda_1, \\ldots, \\log \\lambda_d, \\log \\sigma_f,
 69                \\log \\sigma_n]$, where $\\lambda_i$ is the length scale of
 70                input variable $i$, $d$ is the dimension of the input space
 71                (a.k.a. the number of variables), $\\sigma_f$ is the standard
 72                deviation of the output, and $\\sigma_n$ is the standard
 73                deviation of the noise. In particular, `xi_prior_mean` must
 74                have shape $(d+2,)$.
 75            xi_prior_std: Standard deviations of the (univariate) normal
 76                distributions that components of $\\xi$ are assumed to follow.
 77            n_mc_samples: Number of Monte-Carlo points for spectral sampling
 78            nu: $\\nu$ covariance smoothness parameter. Must currently be left
 79                to `None`.
 80            xi_map_search_n_iter: Number of iterations for the maximum à
 81                posteriori search for the $\\xi$ hyperparameter.
 82            xi_map_search_exclude_percentile: Percentile to **exclude** from
 83                the maximum à posteriori search for the $\\xi$ hyperparameter.
 84                For example, if left to $10%$, then the search will be confined
 85                to the $90%$ region centered around the mean. Should be in $(0,
 86                0.5)$.
 87        """
 88        super().__init__(problem, copy_problem=copy_problem, name=name)
 89        self.reseed(seed)
 90        if nu is not None:
 91            raise NotImplementedError(
 92                "The smoothness parameter nu must be left to None"
 93            )
 94        self._nu = nu
 95        self._n_mc_samples = n_mc_samples
 96        if xi_prior_mean.shape != (self.n_var + 2,):
 97            raise ValueError(
 98                "Invalid prior mean vector: it must have shape "
 99                f"(n_var + 2,), which in this case is ({self.n_var + 2},)."
100            )
101        if xi_prior_std.shape != (self.n_var + 2,):
102            raise ValueError(
103                "Invalid prior standard deviation vector: it must have shape "
104                f"(n_var + 2,), which in this case is ({self.n_var + 2},)."
105            )
106        if not (xi_prior_std > 0.0).all():
107            raise ValueError(
108                "Invalid prior standard deviation vector: it can only have "
109                "strictly positive components."
110            )
111        self._xi_prior_mean = xi_prior_mean
112        self._xi_prior_std = xi_prior_std
113        self._xi_map_search_n_iter = xi_map_search_n_iter
114        self._xi_map_search_exclude_percentile = (
115            xi_map_search_exclude_percentile
116        )
117        self._xi = np.array(
118            [
119                self._generator.normal(xi_prior_mean[i], xi_prior_std[i])
120                for i in range(self.n_var + 2)
121            ]
122        )
123
124    def reseed(self, seed: Any) -> None:
125        self._generator = np.random.default_rng(seed)
126        if isinstance(self._problem, WrappedProblem):
127            self._problem.reseed(seed)
128        super().reseed(seed)
129
130    # pylint: disable=too-many-locals
131    def _evaluate(self, x, out, *args, **kwargs):
132        self._problem._evaluate(x, out, *args, **kwargs)
133
134        self._xi = self._xi_map_search(x, out["F"])
135        exp_xi = np.ma.exp(self._xi)
136        f_std, n_std = exp_xi[-2:]
137
138        # x: k x n_var, where k is the batch size
139        # w: _n_mc_samples x n_var
140        # w @ x.T: _n_mc_samples x k
141        # b: _n_mc_samples x k
142        # zeta_x: _n_mc_samples x k
143        # theta: _n_mc_samples x n_obj
144        # out["F"]: k x n_obj
145        # z = zeta_x.T: k x _n_mc_samples
146        # zzi_inv: _n_mc_samples x _n_mc_samples
147        # m: _n_mc_samples x n_obj
148        # v: _n_mc_samples x _n_mc_samples
149        # theta[:,i] ~ N(m[:,i], V): _n_mc_samples x 1
150        lambda_mat = np.diag(exp_xi[:-2])
151        w = self._generator.multivariate_normal(
152            np.zeros(self.n_var), lambda_mat, size=self._n_mc_samples
153        )
154        b = self._generator.uniform(
155            0, 2 * np.pi, size=(self._n_mc_samples, x.shape[0])
156        )
157        zeta_x = (
158            f_std * np.sqrt(2 / self._n_mc_samples) * np.ma.cos(w.dot(x.T) + b)
159        )
160        z = zeta_x.T
161        zzi_inv = np.linalg.inv(
162            z.T.dot(z) + (n_std**2) * np.eye(self._n_mc_samples)
163        )
164        m = zzi_inv.dot(z.T).dot(out["F"])
165        v = zzi_inv * (n_std**2)
166        # Ensure v is symmetric, as in the reference implementation
167        v = 0.5 * (v + v.T)
168        theta = np.stack(
169            [
170                # The mean needs to be 1-dimensional. The Cholesky
171                # decomposition is used for performance reasons
172                self._generator.multivariate_normal(
173                    m[:, i].flatten(), v, method="cholesky"
174                )
175                for i in range(self.n_obj)
176            ],
177            axis=-1,  # stacks columns instead of rows
178        )
179        out["F"] = zeta_x.T.dot(theta)
180        self.add_to_history_x_out(x, out)
181
182    def _xi_map_search(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
183        """Maximum à posteriori estimate for $\\xi$"""
184
185        def _objective_function(parameters: Dict[str, float]) -> float:
186            xi = np.array(
187                [parameters[f"xi_{i}"] for i in range(self._xi.shape[0])]
188            )
189            return _negative_log_likelyhood(
190                xi, self._xi_prior_mean, self._xi_prior_std, x, y
191            )
192
193        # Use half the percentile in the formula for q
194        q = (
195            np.sqrt(2.0)
196            * erfinv(self._xi_map_search_exclude_percentile - 1)
197            * self._xi_prior_std
198        )
199        search_space = {
200            f"xi_{i}": np.linspace(
201                self._xi_prior_mean[i] + q[i],  # and not - q[i] !
202                self._xi_prior_mean[i] - q[i],
203                100,
204            )
205            for i in range(self._xi.shape[0])
206        }
207        optimizer = ParticleSwarmOptimizer(search_space)
208        optimizer.search(
209            _objective_function,
210            n_iter=self._xi_map_search_n_iter,
211            early_stopping={
212                "n_iter_no_change": int(self._xi_map_search_n_iter / 5)
213            },
214            verbosity=[],
215        )
216        return np.array(
217            [optimizer.best_para[f"xi_{i}"] for i in range(self._xi.shape[0])]
218        )
219
220
221def _negative_log_likelyhood(
222    xi: np.ndarray,
223    xi_prior_mean: np.ndarray,
224    xi_prior_std: np.ndarray,
225    x: np.ndarray,
226    y: np.ndarray,
227) -> float:
228    """
229    Equation 15 but without the terms that don't depend on $\\xi$. Also, there
230    is an issue with the $y^T \\Sigma^{-1} y$ term that isn't a scalar if the
231    dimension of the output space is not $1$. To remediate this, we consider
232    the, $L^\\infty$-norm (i.e. the absolute max) $\\Vert y^T \\Sigma^{-1} y
233    \\Vert_\\infty$.
234    """
235    exp_xi = np.ma.exp(xi)
236    f_std, n_std = exp_xi[-2:]
237    # x: k x n_var, where k is the batch size
238    # y: k x n_obj
239    # lambda_mat: n_var x n_var
240    # r_squared_mat: k x k
241    # sigma_mat: k x k
242    lambda_mat = np.diag(exp_xi[:-2])
243    r_squared_mat = np.array(
244        [[(a - b) @ lambda_mat @ (a - b).T for b in x] for a in x]
245    )
246    sigma_mat = (f_std**2) * np.ma.exp(
247        -0.5 * r_squared_mat
248    ) + n_std * np.eye(x.shape[0])
249    sigma_mat = 0.5 * (sigma_mat + sigma_mat.T)
250    sigma_det = np.linalg.det(sigma_mat)
251    sigma_inv = np.linalg.inv(sigma_mat)
252    y_sigma_y = y.T.dot(sigma_inv).dot(y)
253    return (
254        -0.5 * np.log(np.abs(sigma_det))
255        - 0.5 * np.linalg.norm(y_sigma_y, np.inf)
256        # - 0.5 * x.shape[0] * np.log(2.0 * np.pi)
257        + np.sum(
258            [
259                # -0.5 * np.log(2.0 * np.pi)
260                # - 0.5 * np.log(xi_prior_std[i] ** 2)
261                -1.0
262                / (2.0 * xi_prior_std[i] ** 2)
263                * (xi[i] - xi_prior_mean[i]) ** 2
264                for i in range(x.shape[1])
265            ]
266        )
267    )
class GPSS(nmoo.wrapped_problem.WrappedProblem):
 16class GPSS(WrappedProblem):
 17    """
 18    Implementation of the gaussian process spectral sampling method described
 19    in [^tsemo]. Reference implementation:
 20    https://github.com/Eric-Bradford/TS-EMO/blob/master/TSEMO_V4.m .
 21
 22    [^tsemo] Bradford, E., Schweidtmann, A.M. & Lapkin, A. Efficient
 23        multiobjective optimization employing Gaussian processes, spectral
 24        sampling and a genetic algorithm. J Glob Optim 71, 407–438 (2018).
 25        https://doi.org/10.1007/s10898-018-0609-2
 26    """
 27
 28    _generator: np.random.Generator
 29    """Random number generator."""
 30
 31    _n_mc_samples: int
 32    """Number of Monte-Carlo samples"""
 33
 34    _nu: Optional[int]
 35    """Smoothness parameter of the Matérn covariance function $k$"""
 36
 37    _xi: np.ndarray
 38    """Hyperparameters. See `__init__`."""
 39
 40    _xi_map_search_n_iter: int
 41    """Number of iterations for the search of the MAP estimate of $\\xi$"""
 42
 43    _xi_map_search_exclude_percentile: float
 44    """Area where to **not** search for MAP estimate of $\\xi$"""
 45
 46    _xi_prior_mean: np.ndarray
 47    """Prior means of the components of $\\xi$"""
 48
 49    _xi_prior_std: np.ndarray
 50    """Prior variances of the components of $\\xi$"""
 51
 52    def __init__(
 53        self,
 54        problem: Problem,
 55        xi_prior_mean: np.ndarray,
 56        xi_prior_std: np.ndarray,
 57        nu: Optional[int] = None,
 58        n_mc_samples: int = 4000,
 59        xi_map_search_n_iter: int = 100,
 60        xi_map_search_exclude_percentile: float = 0.1,
 61        seed: Any = None,
 62        copy_problem: bool = True,
 63        name: str = "gpss",
 64    ):
 65        """
 66        Args:
 67            xi_prior_mean: Means of the (univariate) normal distributions that
 68                components of $\\xi$ are assumed to follow. Recall that $\\xi =
 69                [\\log \\lambda_1, \\ldots, \\log \\lambda_d, \\log \\sigma_f,
 70                \\log \\sigma_n]$, where $\\lambda_i$ is the length scale of
 71                input variable $i$, $d$ is the dimension of the input space
 72                (a.k.a. the number of variables), $\\sigma_f$ is the standard
 73                deviation of the output, and $\\sigma_n$ is the standard
 74                deviation of the noise. In particular, `xi_prior_mean` must
 75                have shape $(d+2,)$.
 76            xi_prior_std: Standard deviations of the (univariate) normal
 77                distributions that components of $\\xi$ are assumed to follow.
 78            n_mc_samples: Number of Monte-Carlo points for spectral sampling
 79            nu: $\\nu$ covariance smoothness parameter. Must currently be left
 80                to `None`.
 81            xi_map_search_n_iter: Number of iterations for the maximum à
 82                posteriori search for the $\\xi$ hyperparameter.
 83            xi_map_search_exclude_percentile: Percentile to **exclude** from
 84                the maximum à posteriori search for the $\\xi$ hyperparameter.
 85                For example, if left to $10%$, then the search will be confined
 86                to the $90%$ region centered around the mean. Should be in $(0,
 87                0.5)$.
 88        """
 89        super().__init__(problem, copy_problem=copy_problem, name=name)
 90        self.reseed(seed)
 91        if nu is not None:
 92            raise NotImplementedError(
 93                "The smoothness parameter nu must be left to None"
 94            )
 95        self._nu = nu
 96        self._n_mc_samples = n_mc_samples
 97        if xi_prior_mean.shape != (self.n_var + 2,):
 98            raise ValueError(
 99                "Invalid prior mean vector: it must have shape "
100                f"(n_var + 2,), which in this case is ({self.n_var + 2},)."
101            )
102        if xi_prior_std.shape != (self.n_var + 2,):
103            raise ValueError(
104                "Invalid prior standard deviation vector: it must have shape "
105                f"(n_var + 2,), which in this case is ({self.n_var + 2},)."
106            )
107        if not (xi_prior_std > 0.0).all():
108            raise ValueError(
109                "Invalid prior standard deviation vector: it can only have "
110                "strictly positive components."
111            )
112        self._xi_prior_mean = xi_prior_mean
113        self._xi_prior_std = xi_prior_std
114        self._xi_map_search_n_iter = xi_map_search_n_iter
115        self._xi_map_search_exclude_percentile = (
116            xi_map_search_exclude_percentile
117        )
118        self._xi = np.array(
119            [
120                self._generator.normal(xi_prior_mean[i], xi_prior_std[i])
121                for i in range(self.n_var + 2)
122            ]
123        )
124
125    def reseed(self, seed: Any) -> None:
126        self._generator = np.random.default_rng(seed)
127        if isinstance(self._problem, WrappedProblem):
128            self._problem.reseed(seed)
129        super().reseed(seed)
130
131    # pylint: disable=too-many-locals
132    def _evaluate(self, x, out, *args, **kwargs):
133        self._problem._evaluate(x, out, *args, **kwargs)
134
135        self._xi = self._xi_map_search(x, out["F"])
136        exp_xi = np.ma.exp(self._xi)
137        f_std, n_std = exp_xi[-2:]
138
139        # x: k x n_var, where k is the batch size
140        # w: _n_mc_samples x n_var
141        # w @ x.T: _n_mc_samples x k
142        # b: _n_mc_samples x k
143        # zeta_x: _n_mc_samples x k
144        # theta: _n_mc_samples x n_obj
145        # out["F"]: k x n_obj
146        # z = zeta_x.T: k x _n_mc_samples
147        # zzi_inv: _n_mc_samples x _n_mc_samples
148        # m: _n_mc_samples x n_obj
149        # v: _n_mc_samples x _n_mc_samples
150        # theta[:,i] ~ N(m[:,i], V): _n_mc_samples x 1
151        lambda_mat = np.diag(exp_xi[:-2])
152        w = self._generator.multivariate_normal(
153            np.zeros(self.n_var), lambda_mat, size=self._n_mc_samples
154        )
155        b = self._generator.uniform(
156            0, 2 * np.pi, size=(self._n_mc_samples, x.shape[0])
157        )
158        zeta_x = (
159            f_std * np.sqrt(2 / self._n_mc_samples) * np.ma.cos(w.dot(x.T) + b)
160        )
161        z = zeta_x.T
162        zzi_inv = np.linalg.inv(
163            z.T.dot(z) + (n_std**2) * np.eye(self._n_mc_samples)
164        )
165        m = zzi_inv.dot(z.T).dot(out["F"])
166        v = zzi_inv * (n_std**2)
167        # Ensure v is symmetric, as in the reference implementation
168        v = 0.5 * (v + v.T)
169        theta = np.stack(
170            [
171                # The mean needs to be 1-dimensional. The Cholesky
172                # decomposition is used for performance reasons
173                self._generator.multivariate_normal(
174                    m[:, i].flatten(), v, method="cholesky"
175                )
176                for i in range(self.n_obj)
177            ],
178            axis=-1,  # stacks columns instead of rows
179        )
180        out["F"] = zeta_x.T.dot(theta)
181        self.add_to_history_x_out(x, out)
182
183    def _xi_map_search(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
184        """Maximum à posteriori estimate for $\\xi$"""
185
186        def _objective_function(parameters: Dict[str, float]) -> float:
187            xi = np.array(
188                [parameters[f"xi_{i}"] for i in range(self._xi.shape[0])]
189            )
190            return _negative_log_likelyhood(
191                xi, self._xi_prior_mean, self._xi_prior_std, x, y
192            )
193
194        # Use half the percentile in the formula for q
195        q = (
196            np.sqrt(2.0)
197            * erfinv(self._xi_map_search_exclude_percentile - 1)
198            * self._xi_prior_std
199        )
200        search_space = {
201            f"xi_{i}": np.linspace(
202                self._xi_prior_mean[i] + q[i],  # and not - q[i] !
203                self._xi_prior_mean[i] - q[i],
204                100,
205            )
206            for i in range(self._xi.shape[0])
207        }
208        optimizer = ParticleSwarmOptimizer(search_space)
209        optimizer.search(
210            _objective_function,
211            n_iter=self._xi_map_search_n_iter,
212            early_stopping={
213                "n_iter_no_change": int(self._xi_map_search_n_iter / 5)
214            },
215            verbosity=[],
216        )
217        return np.array(
218            [optimizer.best_para[f"xi_{i}"] for i in range(self._xi.shape[0])]
219        )

Implementation of the gaussian process spectral sampling method described in [^tsemo]. Reference implementation: https://github.com/Eric-Bradford/TS-EMO/blob/master/TSEMO_V4.m .

[^tsemo] Bradford, E., Schweidtmann, A.M. & Lapkin, A. Efficient multiobjective optimization employing Gaussian processes, spectral sampling and a genetic algorithm. J Glob Optim 71, 407–438 (2018). https://doi.org/10.1007/s10898-018-0609-2

GPSS( problem: pymoo.core.problem.Problem, xi_prior_mean: numpy.ndarray, xi_prior_std: numpy.ndarray, nu: Union[int, NoneType] = None, n_mc_samples: int = 4000, xi_map_search_n_iter: int = 100, xi_map_search_exclude_percentile: float = 0.1, seed: Any = None, copy_problem: bool = True, name: str = 'gpss')
 52    def __init__(
 53        self,
 54        problem: Problem,
 55        xi_prior_mean: np.ndarray,
 56        xi_prior_std: np.ndarray,
 57        nu: Optional[int] = None,
 58        n_mc_samples: int = 4000,
 59        xi_map_search_n_iter: int = 100,
 60        xi_map_search_exclude_percentile: float = 0.1,
 61        seed: Any = None,
 62        copy_problem: bool = True,
 63        name: str = "gpss",
 64    ):
 65        """
 66        Args:
 67            xi_prior_mean: Means of the (univariate) normal distributions that
 68                components of $\\xi$ are assumed to follow. Recall that $\\xi =
 69                [\\log \\lambda_1, \\ldots, \\log \\lambda_d, \\log \\sigma_f,
 70                \\log \\sigma_n]$, where $\\lambda_i$ is the length scale of
 71                input variable $i$, $d$ is the dimension of the input space
 72                (a.k.a. the number of variables), $\\sigma_f$ is the standard
 73                deviation of the output, and $\\sigma_n$ is the standard
 74                deviation of the noise. In particular, `xi_prior_mean` must
 75                have shape $(d+2,)$.
 76            xi_prior_std: Standard deviations of the (univariate) normal
 77                distributions that components of $\\xi$ are assumed to follow.
 78            n_mc_samples: Number of Monte-Carlo points for spectral sampling
 79            nu: $\\nu$ covariance smoothness parameter. Must currently be left
 80                to `None`.
 81            xi_map_search_n_iter: Number of iterations for the maximum à
 82                posteriori search for the $\\xi$ hyperparameter.
 83            xi_map_search_exclude_percentile: Percentile to **exclude** from
 84                the maximum à posteriori search for the $\\xi$ hyperparameter.
 85                For example, if left to $10%$, then the search will be confined
 86                to the $90%$ region centered around the mean. Should be in $(0,
 87                0.5)$.
 88        """
 89        super().__init__(problem, copy_problem=copy_problem, name=name)
 90        self.reseed(seed)
 91        if nu is not None:
 92            raise NotImplementedError(
 93                "The smoothness parameter nu must be left to None"
 94            )
 95        self._nu = nu
 96        self._n_mc_samples = n_mc_samples
 97        if xi_prior_mean.shape != (self.n_var + 2,):
 98            raise ValueError(
 99                "Invalid prior mean vector: it must have shape "
100                f"(n_var + 2,), which in this case is ({self.n_var + 2},)."
101            )
102        if xi_prior_std.shape != (self.n_var + 2,):
103            raise ValueError(
104                "Invalid prior standard deviation vector: it must have shape "
105                f"(n_var + 2,), which in this case is ({self.n_var + 2},)."
106            )
107        if not (xi_prior_std > 0.0).all():
108            raise ValueError(
109                "Invalid prior standard deviation vector: it can only have "
110                "strictly positive components."
111            )
112        self._xi_prior_mean = xi_prior_mean
113        self._xi_prior_std = xi_prior_std
114        self._xi_map_search_n_iter = xi_map_search_n_iter
115        self._xi_map_search_exclude_percentile = (
116            xi_map_search_exclude_percentile
117        )
118        self._xi = np.array(
119            [
120                self._generator.normal(xi_prior_mean[i], xi_prior_std[i])
121                for i in range(self.n_var + 2)
122            ]
123        )
Arguments:
  • xi_prior_mean: Means of the (univariate) normal distributions that components of $\xi$ are assumed to follow. Recall that $\xi = [\log \lambda_1, \ldots, \log \lambda_d, \log \sigma_f, \log \sigma_n]$, where $\lambda_i$ is the length scale of input variable $i$, $d$ is the dimension of the input space (a.k.a. the number of variables), $\sigma_f$ is the standard deviation of the output, and $\sigma_n$ is the standard deviation of the noise. In particular, xi_prior_mean must have shape $(d+2,)$.
  • xi_prior_std: Standard deviations of the (univariate) normal distributions that components of $\xi$ are assumed to follow.
  • n_mc_samples: Number of Monte-Carlo points for spectral sampling
  • nu: $\nu$ covariance smoothness parameter. Must currently be left to None.
  • xi_map_search_n_iter: Number of iterations for the maximum à posteriori search for the $\xi$ hyperparameter.
  • xi_map_search_exclude_percentile: Percentile to exclude from the maximum à posteriori search for the $\xi$ hyperparameter. For example, if left to $10%$, then the search will be confined to the $90%$ region centered around the mean. Should be in $(0, 0.5)$.
def reseed(self, seed: Any) -> None:
125    def reseed(self, seed: Any) -> None:
126        self._generator = np.random.default_rng(seed)
127        if isinstance(self._problem, WrappedProblem):
128            self._problem.reseed(seed)
129        super().reseed(seed)

Recursively resets the internal random state of the problem. See the numpy documentation for details about acceptable seeds.

Inherited Members
nmoo.wrapped_problem.WrappedProblem
add_to_history
add_to_history_x_out
all_layers
depth
dump_all_histories
dump_history
ground_problem
innermost_wrapper
start_new_run
pymoo.core.problem.Problem
evaluate
do
nadir_point
ideal_point
pareto_front
pareto_set
has_bounds
has_constraints
bounds
name
calc_constraint_violation