Skip to content

GreedyCover

sgptools.methods.GreedyCover

Bases: HexCover

Greedy sensing-location selection via GP posterior-variance.

The method selects points from a discrete candidate set to cover as many objective/environment points as possible under a single-measurement Gaussian Process (GP) variance reduction criterion.

Refer to the following paper for more details
  • Jakkala et al., 2026. Informative Path Planning with Guaranteed Estimation Uncertainty.
Algorithm summary
  • Build a boolean coverage matrix.
  • Greedily select the candidate with the largest number of newly covered objective points.
  • Stop when the target coverage fraction is reached or the sensing budget is exhausted.
  • Optionally order the selected points via a TSP solver.
Notes
  • Current implementation assumes a single robot (num_robots == 1).
  • May return fewer than num_sensing points if the target is reached early.
  • Raises ValueError if the target coverage is not achievable from the candidate set.
Source code in sgptools/methods.py
class GreedyCover(HexCover):
    """Greedy sensing-location selection via GP posterior-variance.

    The method selects points from a discrete candidate set to cover as many
    objective/environment points as possible under a *single-measurement*
    Gaussian Process (GP) variance reduction criterion.

    Refer to the following paper for more details:
        - Jakkala et al., 2026. *Informative Path Planning with Guaranteed Estimation Uncertainty.*

    Algorithm summary:
        - Build a boolean coverage matrix.
        - Greedily select the candidate with the largest number of newly covered
           objective points.
        - Stop when the target coverage fraction is reached or the sensing budget
           is exhausted.
        - Optionally order the selected points via a TSP solver.

    Notes:
        - Current implementation assumes a single robot (num_robots == 1).
        - May return fewer than num_sensing points if the target is reached early.
        - Raises ValueError if the target coverage is not achievable from the
          candidate set.
    """

    def optimize(self,
                 post_var_threshold: float = 0.7,
                 target_fraction: int = 100,
                 return_fovs: bool = False,
                 slack_ratio: float = None,
                 candidate_method: str = 'Hex',
                 X_warm_start: np.ndarray = [],
                 **kwargs) -> np.ndarray:
        """Run greedy sensing-location selection via GP posterior-variance.

        This method constructs coverage maps using the GP posterior variance.

        Then greedily selects candidates that maximize the number of *newly*
        covered objective points until either:

        - target_fraction percent of objective points are covered, or

        - num_sensing points have been selected.

        Args:
            post_var_threshold (float):
                Posterior variance upper bound at objective points (same units as the
                kernel variance). Lower values demand stronger information gain.
            target_fraction (int):
                Desired percent coverage in [0, 100]. (Using float allows e.g., 95.0.)
            return_fovs (bool):
                If True, also return polygons summarizing each selected candidate’s
                covered region (convex hull of covered objective points, buffered).
            slack_ratio (float | None):
                Non-negative slack used to lower the post_var_threshold when generating 
                the candidate set, generating extra candidates and improving the chance of 
                reaching the target coverage.
            candidate_method (str): 
                Method used to generate the candidate set. Available options: `Hex` and `Grid`.
            X_warm_start (np.ndarray):
                Initial candidate locations to force inclusion.
            **kwargs (Any):
                Extra arguments forwarded to the TSP solver.

        Returns:
            np.ndarray | Tuple[np.ndarray, List[shapely.geometry.Polygon]]:
                X_sol:
                    Array shaped (num_robots, k, d) with k <= num_sensing selected points.
                (X_sol, fovs):
                    If return_fovs is True, also returns a list of shapely Polygons.
        """
        if not hasattr(self, "coverages"):
            # Increase slack ratio until we can reach the target fraction

            if slack_ratio is not None:
                pass
            elif candidate_method=='Hex':
                slack_ratio = 0.5
            elif candidate_method=='Grid':
                slack_ratio = 0.9

            max_fraction = float("-inf")
            while max_fraction < target_fraction:
                max_fraction = self._compute_coverage_maps(
                    post_var_threshold,
                    target_fraction,
                    slack_ratio,
                    candidate_method,
                    X_warm_start,
                    **kwargs,
                )

                if max_fraction >= target_fraction or max_fraction < 0.:
                    break                    

                print("Failed to achieve target coverage. Retrying with increased slack ratio...")
                slack_ratio -= 0.1
                if slack_ratio < 0.:
                    break

            if max_fraction < target_fraction or slack_ratio < 0.:
                raise ValueError(
                    f"Target coverage: {target_fraction:.2f}% is not achievable; "
                    f"maximum possible: {max_fraction:.2f}% with "
                    f"post_var_threshold: {post_var_threshold:.2f} and "
                    f"Final slack_ratio: {slack_ratio:.2f}."
                )

        # ---------------- Greedy loop ----------------
        n = len(self.X_candidates)
        selected = []
        selected_mask = np.zeros(n, dtype=bool)
        current_cover = np.zeros(self.X_objective.shape[0], 
                                    dtype=bool)
        current_sum = 0

        # Add warm start locations if available
        if len(X_warm_start):
            selected = list(range(len(X_warm_start)))
            current_cover = np.any(self.coverages[selected], axis=0)
            current_sum = int(current_cover.sum())
            selected_mask[selected] = True

        while current_sum < self.target_sum and len(selected) < self.num_sensing:
            remaining = np.where(~selected_mask)[0]
            if remaining.size == 0:
                break

            gains = _compute_gains_numba(
                remaining.astype(np.int64),
                self.coverages,
                current_cover
            )

            best_pos = int(np.argmax(gains))
            best_gain = int(gains[best_pos])
            best_idx = int(remaining[best_pos])

            if best_gain <= 0:
                break

            current_cover |= self.coverages[best_idx]
            current_sum = int(current_cover.sum())

            selected_mask[best_idx] = True
            selected.append(best_idx)

            if current_sum >= self.target_sum:
                break

        # ---------------- Prepare output ----------------
        if len(selected) == 0:
            return np.zeros((1, 0, self.num_dim), dtype=self.X_objective.dtype)

        # Remove warm start locations
        if len(X_warm_start):
            selected = selected[len(X_warm_start):]

        X_sol = self.X_candidates[selected]
        X_sol, _ = run_tsp(X_sol, **kwargs)
        X_sol = np.array(X_sol).reshape(self.num_robots, -1, self.num_dim)

        if return_fovs:
            return X_sol, self._get_fovs(self.coverages[selected])
        else:
            return X_sol

    def _get_grid(self, slack_ratio):
        """Generate a grid candidate set."""
        # Get lengthscales
        distance = np.min(self.kernel.get_lengthscales(self.X_objective))
        distance *= slack_ratio

        # Compute grid
        len_x = max(self.X_objective[:, 0])-min(self.X_objective[:, 0])
        len_y = max(self.X_objective[:, 1])-min(self.X_objective[:, 1])
        num_x = np.ceil(len_x/distance)
        num_y = np.ceil(len_y/distance)
        grid_x, grid_y = np.mgrid[min(self.X_objective[:, 0]):max(self.X_objective[:, 0]):complex(num_x), 
                                  min(self.X_objective[:, 1]):max(self.X_objective[:, 1]):complex(num_y)]
        X_grid = np.stack([grid_x, grid_y], axis=-1)
        X_grid = X_grid.reshape(-1, 2).astype(self.X_objective.dtype)

        # Remove sensing locations outside the boundaries
        if self.pbounds is not None:
            points = shapely.points(X_grid)
            inside_idx = shapely.contains(self.pbounds, points)
            X_grid = X_grid[inside_idx]

        return X_grid

    def _compute_coverage_maps(self, 
                               post_var_threshold, 
                               target_fraction, 
                               slack_ratio,
                               method: str = 'Hex',
                               X_warm_start: np.ndarray = [],
                               **kwargs):
        """Build the candidate set and the boolean coverage matrix.

        Steps:
            1) Generate candidate points using HexCover with a tighter threshold 
               or with the Grid approach.
            2) Compute:
                v_cand[i] = k(x_i, x_i)
                v_obj[j]  = k(x_j, x_j)
                K[i, j]   = k(x_i, x_j)
            3) Mark covered(i, j) true when:
                K[i, j]^2 >= (v_obj[j] - var_threshold) * (v_cand[i] + σ_n^2)
            4) Compute the integer number of objective points required to meet
               target_fraction, and verify achievability.

        Args:
            post_var_threshold (float):
                Posterior variance upper bound at objective points.
            target_fraction (float):
                Desired percent coverage in [0, 100].
            slack_ratio (float):
                Non-negative slack to adjust threshold during candidate generation.
            method (str):
                Method to generate the candidate set ('Hex' or 'Grid').
            X_warm_start (np.ndarray):
                Initial candidate locations to force inclusion.
            **kwargs (Any):
                Additional keyword arguments.

        Returns:
            float:
                Maximum achievable fractional coverage.

        Raises:
            ValueError:
                If the candidate set cannot achieve the requested target_fraction.
        """
        # ---------------- Candidate & environment sets ----------------
        X_objective = self.X_objective

        # Get candidates using HexCover or Grid
        if method == 'Hex':
            self.X_candidates = super().optimize(post_var_threshold=post_var_threshold*slack_ratio,
                                                 tsp=False,
                                                 **kwargs)[0]
        elif method == 'Grid':
            self.X_candidates = self._get_grid(slack_ratio)
        else:
            raise ValueError(
                    f"Invalid candidate set generation method: {method}..."
                    f"Available options: Hex and Grid"
                )

        if len(X_warm_start):
            self.X_candidates = np.vstack([X_warm_start, self.X_candidates])

        X_objective = np.asarray(X_objective)
        X_candidates = np.asarray(self.X_candidates, 
                                  dtype=X_objective.dtype)

        # ---------------- Compute coverage maps ----------------
        candidate_vars = self.kernel.K_diag(X_candidates).numpy()
        objective_vars = self.kernel.K_diag(X_objective).numpy()
        fact_1 = np.maximum(objective_vars - post_var_threshold, 0.0)
        fact_2 = candidate_vars + self.noise_variance
        var_condition = np.outer(fact_2, fact_1)
        prior_covs = self.kernel(X_candidates, X_objective).numpy()
        self.coverages = (np.square(prior_covs) > var_condition).astype(bool)
        del var_condition, prior_covs, fact_1, fact_2, candidate_vars, objective_vars

        self.target_sum = int(np.ceil(X_objective.shape[0] * target_fraction / 100.0))

        # Sanity check to ensure target fraction coverage can be achieved from candidate locations
        num_covered = len(np.where(np.sum(self.coverages, axis=0) > 0)[0])
        max_fraction = (100.0 * num_covered) / float(X_objective.shape[0])
        return max_fraction

    def _get_fovs(self, coverages, buffer: float = 0.5):
        """Convert coverage masks into polygonal “fields of view” (FoVs).

        For each selected candidate row in `coverages`, collect the objective points
        that are covered, compute their convex hull, and apply a geometric buffer
        (dilation). Candidates covering fewer than 3 points are skipped.

        Args:
            coverages (np.ndarray):
                Boolean array of shape (k, n_obj) where each row indicates which
                objective points are covered by one selected candidate.
            buffer (float):
                Buffer radius passed to Shapely's .buffer(...).

        Returns:
            List[shapely.geometry.Polygon]:
                Buffered convex hull polygon per candidate (when enough points exist).
        """
        fovs = []
        for cover in coverages:
            mask = self.X_objective[cover]
            if len(mask) > 2:
                fov = geometry.MultiPoint(mask).convex_hull
                fov = fov.buffer(buffer)
                fovs.append(fov)
        return fovs

__init__(num_sensing, X_objective, kernel, noise_variance, transform=None, num_robots=1, X_candidates=None, num_dim=None, height=None, width=None, pbounds=None, **kwargs)

Initialize the method.

Parameters:

Name Type Description Default
num_sensing int

Maximum number of sensing locations (not strictly enforced; the tiling determines the actual number of points).

required
X_objective ndarray

Environment points. Used only to infer the bounding rectangle (min/max in the first two dimensions) when height/width are not provided.

required
kernel Kernel

GP kernel (assumed to have a variance and lengthscales attribute, e.g., SquaredExponential).

required
noise_variance float

Observation noise variance.

required
transform Transform | None

Reserved for compatibility with other methods.

None
num_robots int

Must be 1. Multi-robot tilings are not supported.

1
X_candidates ndarray | None

Ignored. Present for API compatibility with other methods.

None
num_dim int | None

Dimensionality of points. Defaults to X_objective.shape[-1].

None
height float | None

Environment height in the y-direction. If None, inferred from X_objective as y_max - y_min.

None
width float | None

Environment width in the x-direction. If None, inferred from X_objective as x_max - x_min.

None
pbounds ndarray | None

Coordinates of the environment boundry polygon, used to ensure all sensing locations are inside the environment.

None
**kwargs Any

Ignored. Accepted for forward compatibility.

{}
Source code in sgptools/methods.py
def __init__(self,
             num_sensing: int,
             X_objective: np.ndarray,
             kernel: gpflow.kernels.Kernel,
             noise_variance: float,
             transform: Optional[Transform] = None,
             num_robots: int = 1,
             X_candidates: Optional[np.ndarray] = None,
             num_dim: Optional[int] = None,
             height: Optional[float] = None,
             width: Optional[float] = None,
             pbounds: Optional[np.ndarray] = None,
             **kwargs: Any):
    """Initialize the method.

    Args:
        num_sensing (int):
            Maximum number of sensing locations (not strictly enforced; the
            tiling determines the actual number of points).
        X_objective (np.ndarray):
            Environment points. Used only to infer the bounding rectangle
            (min/max in the first two dimensions) when `height`/`width` are
            not provided.
        kernel (gpflow.kernels.Kernel):
            GP kernel (assumed to have a `variance` and `lengthscales`
            attribute, e.g., SquaredExponential).
        noise_variance (float):
            Observation noise variance.
        transform (Transform | None):
            Reserved for compatibility with other methods.
        num_robots (int):
            Must be 1. Multi-robot tilings are not supported.
        X_candidates (np.ndarray | None):
            Ignored. Present for API compatibility with other methods.
        num_dim (int | None):
            Dimensionality of points. Defaults to `X_objective.shape[-1]`.
        height (float | None):
            Environment height in the y-direction. If None, inferred from
            `X_objective` as `y_max - y_min`.
        width (float | None):
            Environment width in the x-direction. If None, inferred from
            `X_objective` as `x_max - x_min`.
        pbounds (np.ndarray | None):
            Coordinates of the environment boundry polygon, used to ensure all 
            sensing locations are inside the environment.
        **kwargs (Any):
            Ignored. Accepted for forward compatibility.
    """
    super().__init__(num_sensing, X_objective, kernel, noise_variance,
                     transform, num_robots, X_candidates, num_dim)

    assert num_robots == 1, "HexCover only supports num_robots = 1."

    self.kernel = kernel
    self.noise_variance = float(noise_variance)

    # Store environment points for dtype and potential debugging
    self.X_objective = np.asarray(X_objective)

    if self.X_objective.ndim != 2 or self.X_objective.shape[1] < 2:
        raise ValueError(
            "HexCover requires X_objective with at least 2 spatial dimensions."
        )

    # Bounding box of the environment in (x, y) from X_objective
    mins = self.X_objective[:, :2].min(axis=0)
    maxs = self.X_objective[:, :2].max(axis=0)
    default_extent = maxs - mins

    self.origin = mins  # shift from [0, W] x [0, H] to actual coords
    self.width = float(width) if width is not None else float(default_extent[0])
    self.height = float(height) if height is not None else float(default_extent[1])

    if pbounds is not None:
        self.pbounds = geometry.Polygon(pbounds)
    else:
        self.pbounds = None

    # Extract scalar lengthscale and prior variance
    self._extract_kernel_scalars()

get_hyperparameters()

Return current kernel and noise variance as (kernel, noise_variance).

Returns:

Type Description
Tuple[Kernel, float]

Tuple[gpflow.kernels.Kernel, float]: A tuple containing the kernel instance and noise variance.

Source code in sgptools/methods.py
def get_hyperparameters(self) -> Tuple[gpflow.kernels.Kernel, float]:
    """Return current kernel and noise variance as (kernel, noise_variance).

    Returns:
        Tuple[gpflow.kernels.Kernel, float]:
            A tuple containing the kernel instance and noise variance.
    """
    return deepcopy(self.kernel), float(self.noise_variance)

optimize(post_var_threshold=0.7, target_fraction=100, return_fovs=False, slack_ratio=None, candidate_method='Hex', X_warm_start=[], **kwargs)

Run greedy sensing-location selection via GP posterior-variance.

This method constructs coverage maps using the GP posterior variance.

Then greedily selects candidates that maximize the number of newly covered objective points until either:

  • target_fraction percent of objective points are covered, or

  • num_sensing points have been selected.

Parameters:

Name Type Description Default
post_var_threshold float

Posterior variance upper bound at objective points (same units as the kernel variance). Lower values demand stronger information gain.

0.7
target_fraction int

Desired percent coverage in [0, 100]. (Using float allows e.g., 95.0.)

100
return_fovs bool

If True, also return polygons summarizing each selected candidate’s covered region (convex hull of covered objective points, buffered).

False
slack_ratio float | None

Non-negative slack used to lower the post_var_threshold when generating the candidate set, generating extra candidates and improving the chance of reaching the target coverage.

None
candidate_method str

Method used to generate the candidate set. Available options: Hex and Grid.

'Hex'
X_warm_start ndarray

Initial candidate locations to force inclusion.

[]
**kwargs Any

Extra arguments forwarded to the TSP solver.

{}

Returns:

Type Description
ndarray

np.ndarray | Tuple[np.ndarray, List[shapely.geometry.Polygon]]: X_sol: Array shaped (num_robots, k, d) with k <= num_sensing selected points. (X_sol, fovs): If return_fovs is True, also returns a list of shapely Polygons.

Source code in sgptools/methods.py
def optimize(self,
             post_var_threshold: float = 0.7,
             target_fraction: int = 100,
             return_fovs: bool = False,
             slack_ratio: float = None,
             candidate_method: str = 'Hex',
             X_warm_start: np.ndarray = [],
             **kwargs) -> np.ndarray:
    """Run greedy sensing-location selection via GP posterior-variance.

    This method constructs coverage maps using the GP posterior variance.

    Then greedily selects candidates that maximize the number of *newly*
    covered objective points until either:

    - target_fraction percent of objective points are covered, or

    - num_sensing points have been selected.

    Args:
        post_var_threshold (float):
            Posterior variance upper bound at objective points (same units as the
            kernel variance). Lower values demand stronger information gain.
        target_fraction (int):
            Desired percent coverage in [0, 100]. (Using float allows e.g., 95.0.)
        return_fovs (bool):
            If True, also return polygons summarizing each selected candidate’s
            covered region (convex hull of covered objective points, buffered).
        slack_ratio (float | None):
            Non-negative slack used to lower the post_var_threshold when generating 
            the candidate set, generating extra candidates and improving the chance of 
            reaching the target coverage.
        candidate_method (str): 
            Method used to generate the candidate set. Available options: `Hex` and `Grid`.
        X_warm_start (np.ndarray):
            Initial candidate locations to force inclusion.
        **kwargs (Any):
            Extra arguments forwarded to the TSP solver.

    Returns:
        np.ndarray | Tuple[np.ndarray, List[shapely.geometry.Polygon]]:
            X_sol:
                Array shaped (num_robots, k, d) with k <= num_sensing selected points.
            (X_sol, fovs):
                If return_fovs is True, also returns a list of shapely Polygons.
    """
    if not hasattr(self, "coverages"):
        # Increase slack ratio until we can reach the target fraction

        if slack_ratio is not None:
            pass
        elif candidate_method=='Hex':
            slack_ratio = 0.5
        elif candidate_method=='Grid':
            slack_ratio = 0.9

        max_fraction = float("-inf")
        while max_fraction < target_fraction:
            max_fraction = self._compute_coverage_maps(
                post_var_threshold,
                target_fraction,
                slack_ratio,
                candidate_method,
                X_warm_start,
                **kwargs,
            )

            if max_fraction >= target_fraction or max_fraction < 0.:
                break                    

            print("Failed to achieve target coverage. Retrying with increased slack ratio...")
            slack_ratio -= 0.1
            if slack_ratio < 0.:
                break

        if max_fraction < target_fraction or slack_ratio < 0.:
            raise ValueError(
                f"Target coverage: {target_fraction:.2f}% is not achievable; "
                f"maximum possible: {max_fraction:.2f}% with "
                f"post_var_threshold: {post_var_threshold:.2f} and "
                f"Final slack_ratio: {slack_ratio:.2f}."
            )

    # ---------------- Greedy loop ----------------
    n = len(self.X_candidates)
    selected = []
    selected_mask = np.zeros(n, dtype=bool)
    current_cover = np.zeros(self.X_objective.shape[0], 
                                dtype=bool)
    current_sum = 0

    # Add warm start locations if available
    if len(X_warm_start):
        selected = list(range(len(X_warm_start)))
        current_cover = np.any(self.coverages[selected], axis=0)
        current_sum = int(current_cover.sum())
        selected_mask[selected] = True

    while current_sum < self.target_sum and len(selected) < self.num_sensing:
        remaining = np.where(~selected_mask)[0]
        if remaining.size == 0:
            break

        gains = _compute_gains_numba(
            remaining.astype(np.int64),
            self.coverages,
            current_cover
        )

        best_pos = int(np.argmax(gains))
        best_gain = int(gains[best_pos])
        best_idx = int(remaining[best_pos])

        if best_gain <= 0:
            break

        current_cover |= self.coverages[best_idx]
        current_sum = int(current_cover.sum())

        selected_mask[best_idx] = True
        selected.append(best_idx)

        if current_sum >= self.target_sum:
            break

    # ---------------- Prepare output ----------------
    if len(selected) == 0:
        return np.zeros((1, 0, self.num_dim), dtype=self.X_objective.dtype)

    # Remove warm start locations
    if len(X_warm_start):
        selected = selected[len(X_warm_start):]

    X_sol = self.X_candidates[selected]
    X_sol, _ = run_tsp(X_sol, **kwargs)
    X_sol = np.array(X_sol).reshape(self.num_robots, -1, self.num_dim)

    if return_fovs:
        return X_sol, self._get_fovs(self.coverages[selected])
    else:
        return X_sol

update(kernel, noise_variance)

Update kernel and noise variance hyperparameters.

Parameters:

Name Type Description Default
kernel Kernel

New GPflow kernel instance.

required
noise_variance float

New observation noise variance.

required
Source code in sgptools/methods.py
def update(self,
           kernel: gpflow.kernels.Kernel,
           noise_variance: float) -> None:
    """Update kernel and noise variance hyperparameters.

    Args:
        kernel (gpflow.kernels.Kernel):
            New GPflow kernel instance.
        noise_variance (float):
            New observation noise variance.
    """
    self.kernel = kernel
    self.noise_variance = float(noise_variance)
    self._extract_kernel_scalars()