Skip to content
5 changes: 5 additions & 0 deletions docs/source/api/quad/solvers.acquisition_functions.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
Acquisition Functions
---------------------
.. automodapi:: probnum.quad.solvers.acquisition_functions
:no-heading:
:headings: "*"
5 changes: 5 additions & 0 deletions docs/source/api/quad/solvers.rst
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,11 @@ probnum.quad.solvers

solvers.policies

.. toctree::
:hidden:

solvers.acquisition_functions

.. toctree::
:hidden:

Expand Down
12 changes: 8 additions & 4 deletions src/probnum/quad/_bayesquad.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,11 @@ def bayesquad(
policy
Type of acquisition strategy to use. Defaults to 'bmc'. Options are

========================== =======
Bayesian Monte Carlo [2]_ ``bmc``
van Der Corput points ``vdc``
========================== =======
============================================ ===========
Bayesian Monte Carlo [2]_ ``bmc``
Van Der Corput points ``vdc``
Uncertainty Sampling with random candidates ``us_rand``
============================================ ===========

initial_design
The type of initial design to use. If ``None`` is given, no initial design is
Expand Down Expand Up @@ -112,6 +113,9 @@ def bayesquad(
num_initial_design_nodes : Optional[IntLike]
The number of nodes created by the initial design. Defaults to
``input_dim * 5`` if an initial design is given.
us_rand_num_candidates : Optional[IntLike]
The number of candidate nodes used by the policy 'us_rand'. Defaults
to 1e2.

Returns
-------
Expand Down
8 changes: 7 additions & 1 deletion src/probnum/quad/solvers/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@
"""Bayesian quadrature methods and their components."""

from . import belief_updates, initial_designs, policies, stopping_criteria
from . import (
acquisition_functions,
belief_updates,
initial_designs,
policies,
stopping_criteria,
)
from ._bayesian_quadrature import BayesianQuadrature
from ._bq_state import BQIterInfo, BQState

Expand Down
20 changes: 18 additions & 2 deletions src/probnum/quad/solvers/_bayesian_quadrature.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,15 @@
from probnum.quad.integration_measures import IntegrationMeasure, LebesgueMeasure
from probnum.quad.kernel_embeddings import KernelEmbedding
from probnum.quad.solvers._bq_state import BQIterInfo, BQState
from probnum.quad.solvers.acquisition_functions import WeightedPredictiveVariance
from probnum.quad.solvers.belief_updates import BQBeliefUpdate, BQStandardBeliefUpdate
from probnum.quad.solvers.initial_designs import InitialDesign, LatinDesign, MCDesign
from probnum.quad.solvers.policies import Policy, RandomPolicy, VanDerCorputPolicy
from probnum.quad.solvers.policies import (
Policy,
RandomMaxAcquisitionPolicy,
RandomPolicy,
VanDerCorputPolicy,
)
from probnum.quad.solvers.stopping_criteria import (
BQStoppingCriterion,
ImmediateStop,
Expand All @@ -38,7 +44,7 @@ class BayesianQuadrature:
Parameters
----------
kernel
The kernel used for the GP model.
The kernel used for the Gaussian process model.
measure
The integration measure.
policy
Expand Down Expand Up @@ -139,6 +145,9 @@ def from_problem(
num_initial_design_nodes : Optional[IntLike]
The number of nodes created by the initial design. Defaults to
``input_dim * 5`` if an initial design is given.
us_rand_num_candidates : Optional[IntLike]
The number of candidate nodes used by the policy 'us_rand'. Defaults
to 1e2.

Returns
-------
Expand Down Expand Up @@ -175,6 +184,7 @@ def from_problem(
num_initial_design_nodes = options.get(
"num_initial_design_nodes", int(5 * input_dim)
)
us_rand_num_candidates = options.get("us_rand_num_candidates", int(1e2))

# Set up integration measure
if domain is None and measure is None:
Expand All @@ -198,6 +208,12 @@ def from_problem(
policy = RandomPolicy(batch_size, measure.sample)
elif policy == "vdc":
policy = VanDerCorputPolicy(batch_size, measure)
elif policy == "us_rand":
policy = RandomMaxAcquisitionPolicy(
batch_size=1,
acquisition_func=WeightedPredictiveVariance(),
n_candidates=us_rand_num_candidates,
)
else:
raise NotImplementedError(f"The given policy ({policy}) is unknown.")

Expand Down
8 changes: 8 additions & 0 deletions src/probnum/quad/solvers/_bq_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ class BQState:
Function evaluations at nodes.
gram
The kernel Gram matrix.
gram_cho_factor
The output of BQBeliefUpdate.compute_gram_cho_factor.
kernel_means
All kernel mean evaluations at ``nodes``.

Expand All @@ -55,6 +57,7 @@ def __init__(
nodes: Optional[np.ndarray] = None,
fun_evals: Optional[np.ndarray] = None,
gram: np.ndarray = np.array([[]]),
gram_cho_factor: Tuple[np.ndarray, bool] = (np.array([[]]), False),
kernel_means: np.ndarray = np.array([]),
):
self.measure = measure
Expand All @@ -73,6 +76,7 @@ def __init__(
self.fun_evals = fun_evals

self.gram = gram
self.gram_cho_factor = gram_cho_factor
self.kernel_means = kernel_means

@classmethod
Expand All @@ -85,6 +89,7 @@ def from_new_data(
integral_belief: Normal,
prev_state: "BQState",
gram: np.ndarray,
gram_cho_factor: Tuple[np.ndarray, bool],
kernel_means: np.ndarray,
) -> "BQState":
r"""Initialize state from updated data.
Expand All @@ -105,6 +110,8 @@ def from_new_data(
Previous state of the BQ loop.
gram
The Gram matrix of the given nodes.
gram_cho_factor
The output of BQBeliefUpdate.compute_gram_cho_factor for ``gram``.
kernel_means
The kernel means at the given nodes.

Expand All @@ -123,6 +130,7 @@ def from_new_data(
nodes=nodes,
fun_evals=fun_evals,
gram=gram,
gram_cho_factor=gram_cho_factor,
kernel_means=kernel_means,
)

Expand Down
13 changes: 13 additions & 0 deletions src/probnum/quad/solvers/acquisition_functions/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
"""Acquisition functions for Bayesian quadrature."""

from ._acquisition_function import AcquisitionFunction
from ._predictive_variance import WeightedPredictiveVariance

# Public classes and functions. Order is reflected in documentation.
__all__ = [
"AcquisitionFunction",
"WeightedPredictiveVariance",
]

# Set correct module paths. Corrects links and module paths in documentation.
WeightedPredictiveVariance.__module__ = "probnum.quad.solvers.acquisition_functions"
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
"""Abstract base class for BQ acquisition functions."""

from __future__ import annotations

import abc
from typing import Optional, Tuple

import numpy as np

from probnum.quad.solvers._bq_state import BQState


class AcquisitionFunction(abc.ABC):
"""An abstract class for an acquisition function for Bayesian quadrature."""

@property
@abc.abstractmethod
def has_gradients(self) -> bool:
"""Whether the acquisition function exposes gradients."""
raise NotImplementedError

@abc.abstractmethod
def __call__(
self, x: np.ndarray, bq_state: BQState
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Evaluates the acquisition function and optionally its gradients.

Parameters
----------
x
*shape=(batch_size, input_dim)* -- The nodes where the acquisition function
is being evaluated.
bq_state
State of the BQ belief.

Returns
-------
acquisition_values :
*shape=(batch_size, )* -- The acquisition values at nodes ``x``.
acquisition_gradients :
*shape=(batch_size, input_dim)* -- The corresponding gradients (optional).
"""
raise NotImplementedError
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
"""Uncertainty sampling for Bayesian Monte Carlo."""

from __future__ import annotations

from typing import Optional, Tuple

import numpy as np

from probnum.quad.solvers._bq_state import BQState
from probnum.quad.solvers.belief_updates import BQStandardBeliefUpdate

from ._acquisition_function import AcquisitionFunction

# pylint: disable=too-few-public-methods, fixme


class WeightedPredictiveVariance(AcquisitionFunction):
r"""The predictive variance acquisition function that yields uncertainty sampling.

The acquisition function is

.. math::
a(x) = \operatorname{Var}(f(x)) p(x)^2

where :math:`\operatorname{Var}(f(x))` is the predictive variance of the model and
:math:`p(x)` is the density of the integration measure :math:`\mu`.

"""

@property
def has_gradients(self) -> bool:
# Todo (#581): this needs to return True, once gradients are available
return False

def __call__(
self,
x: np.ndarray,
bq_state: BQState,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
predictive_variance = bq_state.kernel(x, x)
if bq_state.fun_evals.shape != (0,):
kXx = bq_state.kernel.matrix(bq_state.nodes, x)
regression_weights = BQStandardBeliefUpdate.gram_cho_solve(
bq_state.gram_cho_factor, kXx
)
predictive_variance -= np.sum(regression_weights * kXx, axis=0)
values = bq_state.scale_sq * predictive_variance * bq_state.measure(x) ** 2
return values, None
29 changes: 23 additions & 6 deletions src/probnum/quad/solvers/belief_updates/_belief_update.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def __call__(
"""
raise NotImplementedError

def _compute_gram_cho_factor(self, gram: np.ndarray) -> np.ndarray:
def compute_gram_cho_factor(self, gram: np.ndarray) -> Tuple[np.ndarray, bool]:
"""Compute the Cholesky decomposition of a positive-definite Gram matrix for use
in scipy.linalg.cho_solve

Expand All @@ -68,19 +68,36 @@ def _compute_gram_cho_factor(self, gram: np.ndarray) -> np.ndarray:

Parameters
----------
gram :
gram
symmetric pos. def. kernel Gram matrix :math:`K`, shape (nevals, nevals)

Returns
-------
gram_cho_factor :
The upper triangular Cholesky decomposition of the Gram matrix. Other
parts of the matrix contain random data.
parts of the matrix contain random data. A boolean that indicates whether
the matrix is lower triangular (always False but needed for scipy).
"""
return cho_factor(gram + self.jitter * np.eye(gram.shape[0]))

# pylint: disable=no-self-use
def _gram_cho_solve(self, gram_cho_factor: np.ndarray, z: np.ndarray) -> np.ndarray:
@staticmethod
def gram_cho_solve(
gram_cho_factor: Tuple[np.ndarray, bool], z: np.ndarray
) -> np.ndarray:
"""Wrapper for scipy.linalg.cho_solve. Meant to be used for linear systems of
the gram matrix. Requires the solution of scipy.linalg.cho_factor as input."""
the gram matrix. Requires the solution of scipy.linalg.cho_factor as input.

Parameters
----------
gram_cho_factor
The return object of compute_gram_cho_factor.
z
An array of appropriate shape.

Returns
-------
solution :
The solution ``x`` to the linear system ``gram x = z``.

"""
return cho_solve(gram_cho_factor, z)
12 changes: 8 additions & 4 deletions src/probnum/quad/solvers/belief_updates/_standard_update.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,13 @@ def __call__(
)

# Cholesky factorisation of the Gram matrix
gram_cho_factor = self._compute_gram_cho_factor(gram)
gram_cho_factor = self.compute_gram_cho_factor(gram)

# Estimate scaling parameter
new_scale_sq = self._estimate_scale(fun_evals, gram_cho_factor, bq_state)

# Integral mean and variance
weights = self._gram_cho_solve(gram_cho_factor, kernel_means)
weights = self.gram_cho_solve(gram_cho_factor, kernel_means)
integral_mean = weights @ fun_evals
initial_integral_variance = new_kernel_embedding.kernel_variance()
integral_variance = new_scale_sq * (
Expand All @@ -94,6 +94,7 @@ def __call__(
integral_belief=new_belief,
prev_state=bq_state,
gram=gram,
gram_cho_factor=gram_cho_factor,
kernel_means=kernel_means,
)

Expand All @@ -108,15 +109,18 @@ def _estimate_kernel(self, kernel: Kernel) -> Tuple[Kernel, bool]:
return new_kernel, kernel_was_updated

def _estimate_scale(
self, fun_evals: np.ndarray, gram_cho_factor: np.ndarray, bq_state: BQState
self,
fun_evals: np.ndarray,
gram_cho_factor: Tuple[np.ndarray, bool],
bq_state: BQState,
) -> FloatLike:
"""Estimate the scale parameter."""
if self.scale_estimation is None:
new_scale_sq = bq_state.scale_sq
elif self.scale_estimation == "mle":
new_scale_sq = (
fun_evals
@ self._gram_cho_solve(gram_cho_factor, fun_evals)
@ self.gram_cho_solve(gram_cho_factor, fun_evals)
/ fun_evals.shape[0]
)
else:
Expand Down
3 changes: 3 additions & 0 deletions src/probnum/quad/solvers/policies/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""Policies for Bayesian quadrature."""

from ._max_aquisition_policy import RandomMaxAcquisitionPolicy
from ._policy import Policy
from ._random_policy import RandomPolicy
from ._van_der_corput_policy import VanDerCorputPolicy
Expand All @@ -9,9 +10,11 @@
"Policy",
"RandomPolicy",
"VanDerCorputPolicy",
"RandomMaxAcquisitionPolicy",
]

# Set correct module paths. Corrects links and module paths in documentation.
Policy.__module__ = "probnum.quad.solvers.policies"
RandomPolicy.__module__ = "probnum.quad.solvers.policies"
VanDerCorputPolicy.__module__ = "probnum.quad.solvers.policies"
RandomMaxAcquisitionPolicy.__module__ = "probnum.quad.solvers.policies"
Loading