diff --git a/autofit/example/analysis.py b/autofit/example/analysis.py index 83db61645..581415228 100644 --- a/autofit/example/analysis.py +++ b/autofit/example/analysis.py @@ -132,7 +132,7 @@ def save_attributes(self, paths: af.DirectoryPaths): Parameters ---------- paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. """ paths.save_json(name="data", object_dict=self.data.tolist(), prefix="dataset") diff --git a/autofit/example/visualize.py b/autofit/example/visualize.py index a66f49824..7493021d6 100644 --- a/autofit/example/visualize.py +++ b/autofit/example/visualize.py @@ -33,7 +33,7 @@ def visualize_before_fit( analysis The analysis class used to perform the model-fit whose quantities are being visualized. paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. model The model which is fitted to the data, which may be used to customize the visualization. @@ -84,7 +84,7 @@ def visualize( analysis The analysis class used to perform the model-fit whose quantities are being visualized. paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. instance An instance of the model that is being fitted to the data by this analysis (whose parameters have been set @@ -163,7 +163,7 @@ def visualize_before_fit_combined( analyses A list of the analysis classes used to perform the model-fit whose quantities are being visualized. paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. model The model which is fitted to the data, which may be used to customize the visualization. @@ -202,7 +202,7 @@ def visualize_combined( analyses A list of the analysis classes used to perform the model-fit whose quantities are being visualized. paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. model The model which is fitted to the data, which may be used to customize the visualization. diff --git a/autofit/mapper/prior/abstract.py b/autofit/mapper/prior/abstract.py index a1e3c30db..174bb4851 100644 --- a/autofit/mapper/prior/abstract.py +++ b/autofit/mapper/prior/abstract.py @@ -95,16 +95,11 @@ def new(self): new.id = next(self._ids) return new + @abstractmethod def with_limits(self, lower_limit: float, upper_limit: float) -> "Prior": """ Create a new instance of the same prior class with the passed limits. """ - new = self.__class__( - lower_limit=max(lower_limit, self.lower_limit), - upper_limit=min(upper_limit, self.upper_limit), - ) - new.message = self.message - return new @property def factor(self): diff --git a/autofit/mapper/prior/log_gaussian.py b/autofit/mapper/prior/log_gaussian.py index d13f9d203..a02d77e7b 100644 --- a/autofit/mapper/prior/log_gaussian.py +++ b/autofit/mapper/prior/log_gaussian.py @@ -71,6 +71,37 @@ def __init__( id_=id_, ) + @classmethod + def with_limits(cls, lower_limit: float, upper_limit: float) -> "LogGaussianPrior": + """ + Create a new gaussian prior centred between two limits + with sigma distance between this limits. + + Note that these limits are not strict so exceptions will not + be raised for values outside of the limits. + + This function is typically used in prior passing, where the + result of a model-fit are used to create new Gaussian priors + centred on the previously estimated median PDF model. + + Parameters + ---------- + lower_limit + The lower limit of the new Gaussian prior. + upper_limit + The upper limit of the new Gaussian Prior. + + Returns + ------- + A new GaussianPrior + """ + return cls( + mean=(lower_limit + upper_limit) / 2, + sigma=upper_limit - lower_limit, + lower_limit=lower_limit, + upper_limit=upper_limit, + ) + def _new_for_base_message(self, message): """ Create a new instance of this wrapper but change the parameters used diff --git a/autofit/mapper/prior/uniform.py b/autofit/mapper/prior/uniform.py index 3aa799396..3cea04a90 100644 --- a/autofit/mapper/prior/uniform.py +++ b/autofit/mapper/prior/uniform.py @@ -64,6 +64,16 @@ def __init__( def tree_flatten(self): return (self.lower_limit, self.upper_limit), (self.id,) + def with_limits( + self, + lower_limit: float, + upper_limit: float, + ) -> "Prior": + return UniformPrior( + lower_limit=lower_limit, + upper_limit=upper_limit, + ) + def logpdf(self, x): # TODO: handle x as a numpy array if x == self.lower_limit: @@ -97,9 +107,9 @@ def value_for(self, unit: float, ignore_prior_limits: bool = False) -> float: physical_value = prior.value_for(unit=0.2) """ - return float(round( - super().value_for(unit, ignore_prior_limits=ignore_prior_limits), 14 - )) + return float( + round(super().value_for(unit, ignore_prior_limits=ignore_prior_limits), 14) + ) def log_prior_from_value(self, value): """ diff --git a/autofit/non_linear/analysis/visualize.py b/autofit/non_linear/analysis/visualize.py index 7b0e74313..d0000c28a 100644 --- a/autofit/non_linear/analysis/visualize.py +++ b/autofit/non_linear/analysis/visualize.py @@ -32,7 +32,7 @@ def should_visualize( Parameters ---------- paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization and the pickled objects used by the aggregator output by this function. Returns diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 4b8e672e5..ad83f2065 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -1,7 +1,7 @@ -import csv import logging import os from copy import copy +import numpy as np from pathlib import Path from typing import List, Generator, Callable, ClassVar, Optional, Union, Tuple @@ -9,12 +9,11 @@ from autofit.mapper.model import ModelInstance from autofit.mapper.prior_model.abstract import AbstractPriorModel from autofit.non_linear.grid.grid_search import make_lists, Sequential -from autofit.non_linear.grid.sensitivity.job import Job +from autofit.non_linear.grid.sensitivity.job import Job, MaskedJobResult from autofit.non_linear.grid.sensitivity.job import JobResult from autofit.non_linear.grid.sensitivity.result import SensitivityResult from autofit.non_linear.parallel import Process from autofit.text.formatter import write_table -from autofit.text.text_util import padding class Sensitivity: @@ -30,6 +29,7 @@ def __init__( job_cls: ClassVar = Job, perturb_model_prior_func: Optional[Callable] = None, number_of_steps: Union[Tuple[int, ...], int] = 4, + mask: Optional[List[bool]] = None, number_of_cores: int = 2, limit_scale: int = 1, ): @@ -65,6 +65,10 @@ def __init__( The number of steps for each dimension of the sensitivity grid. If input as a float the dimensions are all that value. If input as a tuple of length the number of dimensions, each tuple value is the number of steps in that dimension. + mask + A mask to apply to the sensitivity grid, such that all `True` values are not included in the sensitivity + mapping. This is useful for removing regions of the sensitivity grid that are expected to have no + sensitivity, for example because they have no signal. number_of_cores How many cores does this computer have? limit_scale @@ -94,6 +98,22 @@ def __init__( self.job_cls = job_cls self.number_of_steps = number_of_steps + self.mask = None + + if mask is not None: + self.mask = np.asarray(mask) + if self.shape != self.mask.shape: + raise ValueError( + f""" + The mask of the Sensitivity object must have the same shape as the sensitivity grid. + + For your inputs, the shape of each are as follows: + + Sensitivity Grid: {self.shape} + Mask: {self.mask.shape} + """ + ) + self.number_of_cores = number_of_cores self.limit_scale = limit_scale @@ -117,9 +137,24 @@ def run(self) -> SensitivityResult: process_class = Process if self.number_of_cores > 1 else Sequential - results = list() + results = [] + jobs = [] + + for number in range(len(self._perturb_instances)): + if self._should_bypass(number=number): + model = self.model.copy() + model.perturb = self._perturb_models[number] + results.append( + MaskedJobResult( + number=number, + model=model, + ) + ) + else: + jobs.append(self._make_job(number)) + for result in process_class.run_jobs( - self._make_jobs(), number_of_cores=self.number_of_cores + jobs, number_of_cores=self.number_of_cores ): if isinstance(result, Exception): raise result @@ -149,6 +184,7 @@ def run(self) -> SensitivityResult: result.perturb_result.samples_summary for result in results ], shape=self.shape, + path_values=self.path_values, ) self.paths.save_json("result", to_dict(sensitivity_result)) @@ -177,6 +213,21 @@ def shape(self) -> Tuple[int, ...]: self.number_of_steps for _ in range(self.perturb_model.prior_count) ) + def shape_index_from_number(self, number: int) -> Tuple[int, ...]: + """ + Returns the index of the sensitivity grid from a number. + + Parameters + ---------- + number + The number of the sensitivity grid. + + Returns + ------- + The index of the sensitivity grid. + """ + return np.unravel_index(number, self.shape) + @property def step_size(self) -> Union[float, Tuple]: """ @@ -204,6 +255,17 @@ def _lists(self) -> List[List[float]]: """ return make_lists(self.perturb_model.prior_count, step_size=self.step_size) + @property + def path_values(self): + paths = [ + self.perturb_model.path_for_prior(prior) + for prior in self.perturb_model.priors_ordered_by_id + ] + + return { + path: list(values) for path, *values in zip(paths, *self._physical_values) + } + @property def _physical_values(self) -> List[List[float]]: """ @@ -228,31 +290,36 @@ def _headers(self) -> Generator[str, None, None]: yield path @property - def _labels(self) -> Generator[str, None, None]: + def _labels(self) -> List[str]: """ One label for each perturbation, used to distinguish fits for each perturbation by placing them in separate directories. """ + labels = [] for list_ in self._lists: strings = list() for value, prior_tuple in zip(list_, self.perturb_model.prior_tuples): path, prior = prior_tuple value = prior.value_for(value) strings.append(f"{path}_{value}") - yield "_".join(strings) + labels.append("_".join(strings)) + + return labels @property - def _perturb_instances(self) -> Generator[ModelInstance, None, None]: + def _perturb_instances(self) -> List[ModelInstance]: """ A list of instances each of which defines a perturbation to be applied to the image. """ - for list_ in self._lists: - yield self.perturb_model.instance_from_unit_vector(list_) + + return [ + self.perturb_model.instance_from_unit_vector(list_) for list_ in self._lists + ] @property - def _perturb_models(self) -> Generator[AbstractPriorModel, None, None]: + def _perturb_models(self) -> List[AbstractPriorModel]: """ A list of models representing a perturbation at each grid square. @@ -266,6 +333,8 @@ def _perturb_models(self) -> Generator[AbstractPriorModel, None, None]: step_sizes = (self.step_size,) * self.perturb_model.prior_count half_steps = [self.limit_scale * step_size / 2 for step_size in step_sizes] + + perturb_models = [] for list_ in self._lists: limits = [ ( @@ -278,38 +347,48 @@ def _perturb_models(self) -> Generator[AbstractPriorModel, None, None]: half_steps, ) ] - yield self.perturb_model.with_limits(limits) + perturb_models.append(self.perturb_model.with_limits(limits)) + return perturb_models + + def _should_bypass(self, number: int) -> bool: + shape_index = self.shape_index_from_number(number=number) + return self.mask is not None and np.asarray(self.mask)[shape_index] def _make_jobs(self) -> Generator[Job, None, None]: + for number, _ in enumerate(self._perturb_instances): + yield self._make_job(number) + + def _make_job(self, number) -> Generator[Job, None, None]: """ Create a list of jobs to be run on separate processes. Each job fits a perturb image with the original model and a model which includes a perturbation. """ - for number, (perturb_instance, perturb_model, label) in enumerate( - zip(self._perturb_instances, self._perturb_models, self._labels) - ): - if self.perturb_model_prior_func is not None: - perturb_model = self.perturb_model_prior_func( - perturb_instance=perturb_instance, perturb_model=perturb_model - ) + perturb_instance = self._perturb_instances[number] + perturb_model = self._perturb_models[number] + label = self._labels[number] - simulate_instance = copy(self.instance) - simulate_instance.perturb = perturb_instance - - paths = self.paths.for_sub_analysis( - label, + if self.perturb_model_prior_func is not None: + perturb_model = self.perturb_model_prior_func( + perturb_instance=perturb_instance, perturb_model=perturb_model ) - yield self.job_cls( - simulate_instance=simulate_instance, - model=self.model, - perturb_model=perturb_model, - base_instance=self.instance, - simulate_cls=self.simulate_cls, - base_fit_cls=self.base_fit_cls, - perturb_fit_cls=self.perturb_fit_cls, - paths=paths, - number=number, - ) + simulate_instance = copy(self.instance) + simulate_instance.perturb = perturb_instance + + paths = self.paths.for_sub_analysis( + label, + ) + + return self.job_cls( + simulate_instance=simulate_instance, + model=self.model, + perturb_model=perturb_model, + base_instance=self.instance, + simulate_cls=self.simulate_cls, + base_fit_cls=self.base_fit_cls, + perturb_fit_cls=self.perturb_fit_cls, + paths=paths, + number=number, + ) diff --git a/autofit/non_linear/grid/sensitivity/job.py b/autofit/non_linear/grid/sensitivity/job.py index c5089eb0f..2be43c154 100644 --- a/autofit/non_linear/grid/sensitivity/job.py +++ b/autofit/non_linear/grid/sensitivity/job.py @@ -35,7 +35,10 @@ def log_evidence_increase(self) -> Optional[float]: if hasattr(self.result.samples, "log_evidence"): if self.result.samples.log_evidence is not None: - return float(self.perturb_result.samples.log_evidence - self.result.samples.log_evidence) + return float( + self.perturb_result.samples.log_evidence + - self.result.samples.log_evidence + ) @property def log_likelihood_increase(self) -> Optional[float]: @@ -48,6 +51,39 @@ def log_likelihood_increase(self) -> Optional[float]: return float(self.perturb_result.log_likelihood - self.result.log_likelihood) +class MaskedJobResult(AbstractJobResult): + """ + A placeholder result for a job that has been masked out. + """ + + def __init__(self, number, model): + super().__init__(number) + self.model = model + + @property + def result(self): + return self + + @property + def perturb_result(self): + return self + + def __getattr__(self, item): + return None + + @property + def samples_summary(self): + return self + + @property + def log_evidence(self): + return 0.0 + + @property + def log_likelihood(self): + return 0.0 + + class Job(AbstractJob): _number = count() @@ -111,6 +147,7 @@ def perform(self) -> JobResult: model=self.model, dataset=dataset, paths=self.paths.for_sub_analysis("[base]"), + instance=self.simulate_instance, ) perturb_model = copy(self.model) @@ -120,6 +157,7 @@ def perform(self) -> JobResult: model=perturb_model, dataset=dataset, paths=self.paths.for_sub_analysis("[perturb]"), + instance=self.simulate_instance, ) return JobResult( diff --git a/autofit/non_linear/grid/sensitivity/result.py b/autofit/non_linear/grid/sensitivity/result.py index 346909bbd..8e5fd4839 100644 --- a/autofit/non_linear/grid/sensitivity/result.py +++ b/autofit/non_linear/grid/sensitivity/result.py @@ -1,4 +1,4 @@ -from typing import List, Tuple, Union +from typing import List, Tuple, Union, Dict from autofit.non_linear.grid.grid_list import GridList, as_grid_list from autofit.non_linear.grid.grid_search.result import AbstractGridSearchResult @@ -12,22 +12,22 @@ def __init__( samples: List[SamplesInterface], perturb_samples: List[SamplesInterface], shape: Tuple[int, ...], + path_values: Dict[Tuple[str, ...], List[float]], ): """ The result of a sensitivity mapping Parameters ---------- - results - The results of each sensitivity job. - physical_values - A list of lists of values representing the physical values of the sensitivity grid values. shape The shape of the sensitivity mapping grid. + path_values + A list of tuples of the path to the grid priors and the physical values themselves. """ super().__init__(GridList(samples, shape)) self.perturb_samples = GridList(perturb_samples, shape) self.shape = shape + self.path_values = path_values def perturbed_physical_centres_list_from( self, path: Union[str, Tuple[str, ...]] @@ -40,10 +40,9 @@ def perturbed_physical_centres_list_from( path The path to the physical centres in the samples """ - return self._physical_centres_lists_from( - self.perturb_samples, - path, - ) + if isinstance(path, str): + path = tuple(path.split(".")) + return self.path_values[path] def __getitem__(self, item): return self.samples[item] diff --git a/autofit/non_linear/plot/nest_plotters.py b/autofit/non_linear/plot/nest_plotters.py index 64d4cd43a..2d4c470cb 100644 --- a/autofit/non_linear/plot/nest_plotters.py +++ b/autofit/non_linear/plot/nest_plotters.py @@ -1,5 +1,3 @@ -from anesthetic.samples import NestedSamples -from anesthetic import make_2d_axes from functools import wraps import numpy as np import warnings @@ -59,6 +57,8 @@ def corner_anesthetic(self, **kwargs): config_dict = conf.instance["visualize"]["plots_settings"]["corner_anesthetic"] + from anesthetic.samples import NestedSamples + from anesthetic import make_2d_axes import matplotlib.pylab as pylab params = {'font.size' : int(config_dict["fontsize"])} diff --git a/docs/cookbooks/analysis.rst b/docs/cookbooks/analysis.rst index 8b766c3d6..de6ce6d0d 100644 --- a/docs/cookbooks/analysis.rst +++ b/docs/cookbooks/analysis.rst @@ -641,7 +641,7 @@ These files can then also be loaded via the database, as described in the databa Parameters ---------- paths - The PyAutoFit paths object which manages all paths, e.g. where + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. """ @@ -674,7 +674,7 @@ These files can then also be loaded via the database, as described in the databa Parameters ---------- paths - The PyAutoFit paths object which manages all paths, e.g. where the + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization and the pickled objects used by the aggregator output by this function. result diff --git a/docs/cookbooks/result.rst b/docs/cookbooks/result.rst index ccbb3f56e..7102e759f 100644 --- a/docs/cookbooks/result.rst +++ b/docs/cookbooks/result.rst @@ -553,7 +553,7 @@ as 1D numpy arrays, are converted to a suitable dictionary output format. This u Parameters ---------- paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. """ from autoconf.dictable import to_dict @@ -575,7 +575,7 @@ as 1D numpy arrays, are converted to a suitable dictionary output format. This u Parameters ---------- paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization and the pickled objects used by the aggregator output by this function. result The result of a model fit, including the non-linear search, samples and maximum likelihood model. diff --git a/test_autofit/non_linear/grid/test_sensitivity/conftest.py b/test_autofit/non_linear/grid/test_sensitivity/conftest.py index f808ffc39..715e9211f 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/conftest.py +++ b/test_autofit/non_linear/grid/test_sensitivity/conftest.py @@ -38,7 +38,7 @@ class BaseFit: def __init__(self, analysis_cls): self.analysis_cls = analysis_cls - def __call__(self, dataset, model, paths): + def __call__(self, dataset, model, paths, instance): search = af.m.MockSearch( return_sensitivity_results=True, samples_summary=MockSamplesSummary(model=model), @@ -53,7 +53,7 @@ class PerturbFit: def __init__(self, analysis_cls): self.analysis_cls = analysis_cls - def __call__(self, dataset, model, paths): + def __call__(self, dataset, model, paths, instance): search = af.m.MockSearch( return_sensitivity_results=True, samples_summary=MockSamplesSummary(model=model), @@ -88,6 +88,37 @@ def make_sensitivity( ) +@pytest.fixture(name="masked_sensitivity") +def make_masked_sensitivity( + perturb_model, +): + # noinspection PyTypeChecker + instance = af.ModelInstance() + instance.gaussian = af.Gaussian() + return s.Sensitivity( + simulation_instance=instance, + base_model=af.Collection(gaussian=af.Model(af.Gaussian)), + perturb_model=perturb_model, + simulate_cls=Simulate(), + base_fit_cls=BaseFit(Analysis), + perturb_fit_cls=PerturbFit(Analysis), + paths=af.DirectoryPaths(), + number_of_steps=2, + mask=np.array( + [ + [ + [True, True], + [True, True], + ], + [ + [True, True], + [True, True], + ], + ] + ), + ) + + @pytest.fixture(name="job") def make_job( perturb_model, diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py b/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py index 4d1330d1c..511bada9a 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py @@ -122,11 +122,6 @@ def test_prior_with_limits(self): assert prior.lower_limit == 3 assert prior.upper_limit == 5 - def test_existing_limits(self): - prior = af.UniformPrior(2, 4).with_limits(3, 5) - assert prior.lower_limit == 3 - assert prior.upper_limit == 4 - @pytest.fixture(name="tuple_sensitivity") def make_tuple_sensitivity(sensitivity): diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py new file mode 100644 index 000000000..bf9c33314 --- /dev/null +++ b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py @@ -0,0 +1,71 @@ +from math import prod + +import pytest + +import autofit as af + + +@pytest.fixture(name="masked_result") +def make_masked_result(masked_sensitivity): + return masked_sensitivity.run() + + +def test_result_size(masked_sensitivity, masked_result): + number_elements = prod(masked_sensitivity.shape) + assert len(masked_result.samples) == number_elements + + +def test_sample(masked_result): + sample = masked_result.samples[0] + assert sample.model is not None + assert sample.model.perturb is not None + assert sample.log_evidence == 0.0 + assert sample.log_likelihood == 0.0 + + +@pytest.mark.parametrize( + "lower, upper, mean", + [ + (0.0, 1.0, 0.5), + (-1.0, 1.0, 0.0), + (-1.0, 0.0, -0.5), + (0.5, 1.0, 0.75), + ], +) +def test_mean_uniform_prior( + lower, + upper, + mean, +): + prior = af.UniformPrior( + lower_limit=0.0, + upper_limit=1.0, + ) + assert ( + prior.with_limits( + lower, + upper, + ).mean + == mean + ) + + +def test_path_value_dicts(masked_sensitivity): + assert masked_sensitivity.path_values == { + ("centre",): [0.25, 0.25, 0.25, 0.25, 0.75, 0.75, 0.75, 0.75], + ("normalization",): [0.25, 0.25, 0.75, 0.75, 0.25, 0.25, 0.75, 0.75], + ("sigma",): [0.25, 0.75, 0.25, 0.75, 0.25, 0.75, 0.25, 0.75], + } + + +def test_perturbed_physical_centres_list_from(masked_result): + assert masked_result.perturbed_physical_centres_list_from("centre") == [ + 0.25, + 0.25, + 0.25, + 0.25, + 0.75, + 0.75, + 0.75, + 0.75, + ] diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_results.py b/test_autofit/non_linear/grid/test_sensitivity/test_results.py index b531d2aa2..655794eba 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_results.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_results.py @@ -48,6 +48,9 @@ def make_sensitivity_result(job_result): samples=[job_result.result.samples.summary()], perturb_samples=[job_result.perturb_result.samples.summary()], shape=(1,), + path_values={ + ("centre",): [0.5], + }, )