Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions autolens/analysis/positions.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,10 +224,12 @@ def log_likelihood_penalty_base_from(
residual_map=residual_map, noise_map=dataset.noise_map
)

chi_squared = aa.util.fit.chi_squared_from(chi_squared_map=chi_squared_map)
chi_squared = aa.util.fit.chi_squared_from(
chi_squared_map=chi_squared_map.array
)

noise_normalization = aa.util.fit.noise_normalization_from(
noise_map=dataset.noise_map
noise_map=dataset.noise_map.array
)

else:
Expand Down
1 change: 1 addition & 0 deletions autolens/imaging/fit_imaging.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ def tracer_to_inversion(self) -> TracerToInversion:
noise_map=self.noise_map,
grids=self.grids,
psf=self.dataset.psf,
convolver=self.dataset.convolver,
w_tilde=self.w_tilde,
)

Expand Down
1 change: 1 addition & 0 deletions autolens/lens/to_inversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,7 @@ def lp_linear_func_list_galaxy_dict(
noise_map=self.dataset.noise_map,
grids=grids,
psf=self.psf,
convolver=self.dataset.convolver,
transformer=self.transformer,
w_tilde=self.dataset.w_tilde,
)
Expand Down
7 changes: 3 additions & 4 deletions autolens/lens/tracer.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from abc import ABC
import numpy as np
from functools import wraps
from scipy.interpolate import griddata
from typing import Dict, List, Optional, Type, Union

Expand Down Expand Up @@ -549,9 +548,9 @@ def image_2d_via_input_plane_image_from(
)[plane_index]

image = griddata(
points=plane_grid,
values=plane_image,
xi=traced_grid.over_sampled,
points=plane_grid.array,
values=plane_image.array,
xi=traced_grid.over_sampled.array,
fill_value=0.0,
method="linear",
)
Expand Down
6 changes: 3 additions & 3 deletions autolens/point/fit/positions/source/separations.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from autoarray.numpy_wrapper import numpy as npw
import jax.numpy as jnp
import numpy as np
from typing import Optional

Expand Down Expand Up @@ -126,8 +126,8 @@ def noise_normalization(self) -> float:
"""
Returns the normalization of the noise-map, which is the sum of the noise-map values squared.
"""
return npw.sum(
npw.log(
return jnp.sum(
jnp.log(
2
* np.pi
* (self.magnifications_at_positions**-2.0 * self.noise_map**2.0)
Expand Down
39 changes: 18 additions & 21 deletions autolens/point/solver/point_solver.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,7 @@
import logging
from typing import Tuple, Optional

from autoarray.numpy_wrapper import np

import autoarray as aa
from autoarray.numpy_wrapper import use_jax
from autoarray.structures.triangles.shape import Point

from autofit.jax_wrapper import jit, register_pytree_node_class
Expand Down Expand Up @@ -56,23 +53,23 @@ def solve(
filtered_means = self._filter_low_magnification(
tracer=tracer, points=kept_triangles.means
)
if use_jax:
return aa.Grid2DIrregular([pair for pair in filtered_means])

filtered_means = [
pair for pair in filtered_means if not np.any(np.isnan(pair)).all()
]

difference = len(kept_triangles.means) - len(filtered_means)
if difference > 0:
logger.debug(
f"Filtered one multiple-image with magnification below threshold."
)
elif difference > 1:
logger.warning(
f"Filtered {difference} multiple-images with magnification below threshold."
)
return aa.Grid2DIrregular([pair for pair in filtered_means])

return aa.Grid2DIrregular(
[pair for pair in filtered_means if not np.isnan(pair).all()]
)
# filtered_means = [
# pair for pair in filtered_means if not np.any(np.isnan(pair)).all()
# ]
#
# difference = len(kept_triangles.means) - len(filtered_means)
# if difference > 0:
# logger.debug(
# f"Filtered one multiple-image with magnification below threshold."
# )
# elif difference > 1:
# logger.warning(
# f"Filtered {difference} multiple-images with magnification below threshold."
# )
#
# return aa.Grid2DIrregular(
# [pair for pair in filtered_means if not np.isnan(pair).all()]
# )
28 changes: 9 additions & 19 deletions autolens/point/solver/shape_solver.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import jax.numpy as jnp
from jax import jit
import logging
import math

Expand All @@ -6,23 +8,11 @@
import autoarray as aa

from autoarray.structures.triangles.shape import Shape
from autofit.jax_wrapper import jit, use_jax, numpy as np, register_pytree_node_class

try:
if use_jax:
from autoarray.structures.triangles.coordinate_array.jax_coordinate_array import (
CoordinateArrayTriangles,
)
else:
from autoarray.structures.triangles.coordinate_array.coordinate_array import (
CoordinateArrayTriangles,
)

except ImportError:
from autoarray.structures.triangles.coordinate_array.coordinate_array import (
CoordinateArrayTriangles,
)
from autofit.jax_wrapper import register_pytree_node_class

from autoarray.structures.triangles.coordinate_array.jax_coordinate_array import (
CoordinateArrayTriangles,
)
from autoarray.structures.triangles.abstract import AbstractTriangles

from autogalaxy import OperateDeflections
Expand Down Expand Up @@ -278,13 +268,13 @@ def _filter_low_magnification(
-------
The points with an absolute magnification above the threshold.
"""
points = np.array(points)
points = jnp.array(points)
magnifications = tracer.magnification_2d_via_hessian_from(
grid=aa.Grid2DIrregular(points),
buffer=self.scale,
)
mask = np.abs(magnifications.array) > self.magnification_threshold
return np.where(mask[:, None], points, np.nan)
mask = jnp.abs(magnifications.array) > self.magnification_threshold
return jnp.where(mask[:, None], points, jnp.nan)

def _source_triangles(
self,
Expand Down
1 change: 0 additions & 1 deletion docs/installation/conda.rst
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,6 @@ For interferometer analysis there are two optional dependencies that must be ins
.. code-block:: bash

pip install pynufft
pip install pylops==2.3.1

**PyAutoLens** will run without these libraries and it is recommended that you only install them if you intend to
do interferometer analysis.
Expand Down
4 changes: 1 addition & 3 deletions docs/installation/overview.rst
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,4 @@ Dependencies

And the following optional dependencies:

**pynufft**: https://github.com/jyhmiinlin/pynufft

**PyLops**: https://github.com/PyLops/pylops
**pynufft**: https://github.com/jyhmiinlin/pynufft
1 change: 0 additions & 1 deletion docs/installation/pip.rst
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ For interferometer analysis there are two optional dependencies that must be ins
.. code-block:: bash

pip install pynufft
pip install pylops==2.3.1

**PyAutoLens** will run without these libraries and it is recommended that you only install them if you intend to
do interferometer analysis.
Expand Down
1 change: 0 additions & 1 deletion docs/installation/source.rst
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ For unit tests to pass you will also need the following optional requirements:
.. code-block:: bash

pip install pynufft
pip install pylops==2.3.1

If you are using a ``conda`` environment, add the source repository as follows:

Expand Down
11 changes: 0 additions & 11 deletions files/citations.bib
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,6 @@ @article{astropy2
Bdsk-Url-1 = {https://doi.org/10.3847/1538-3881/aabc4f}
}

@article{PyLops,
abstract = {Linear operators and optimisation are at the core of many algorithms used in signal and image processing, remote sensing, and inverse problems. For small to medium-scale problems, existing software packages (e.g., MATLAB, Python numpy and scipy) allow for explicitly building dense (or sparse) matrices and performing algebraic operations (e.g., computation of matrix-vector products and manipulation of matrices) with syntax that closely represents their corresponding analytical forms. However, many real application, large-scale operators do not lend themselves to explicit matrix representations, usually forcing practitioners to forego of the convenient linear-algebra syntax available for their explicit-matrix counterparts. PyLops is an open-source Python library providing a flexible and scalable framework for the creation and combination of so-called linear operators, class-based entities that represent matrices and inherit their associated syntax convenience, but do not rely on the creation of explicit matrices. We show that PyLops operators can dramatically reduce the memory load and CPU computations compared to explicit-matrix calculations, while still allowing users to seamlessly use their existing knowledge of compact matrix-based syntax that scales to any problem size because no explicit matrices are required.},
archivePrefix = {arXiv},
arxivId = {1907.12349},
author = {Ravasi, Matteo and Vasconcelos, Ivan},
eprint = {1907.12349},
file = {:home/jammy/Documents/Papers/Software/PyLops.pdf:pdf},
title = {{PyLops -- A Linear-Operator Python Library for large scale optimization}},
url = {http://arxiv.org/abs/1907.12349},
year = {2019}
}

@article{colossus,
abstract = {This paper introduces Colossus, a public, open-source python package for calculations related to cosmology, the large-scale structure (LSS) of matter in the universe, and the properties of dark matter halos. The code is designed to be fast and easy to use, with a coherent, well-documented user interface. The cosmology module implements Friedman-Lemaitre-Robertson-Walker cosmologies including curvature, relativistic species, and different dark energy equations of state, and provides fast computations of the linear matter power spectrum, variance, and correlation function. The LSS module is concerned with the properties of peaks in Gaussian random fields and halos in a statistical sense, including their peak height, peak curvature, halo bias, and mass function. The halo module deals with spherical overdensity radii and masses, density profiles, concentration, and the splashback radius. To facilitate the rapid exploration of these quantities, Colossus implements more than 40 different fitting functions from the literature. I discuss the core routines in detail, with particular emphasis on their accuracy. Colossus is available at bitbucket.org/bdiemer/colossus.},
Expand Down
1 change: 0 additions & 1 deletion files/citations.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ This work uses the following software packages:
- `PyAutoFit` https://github.com/rhayes777/PyAutoFit [@pyautofit]
- `PyAutoGalaxy` https://github.com/Jammy2211/PyAutoGalaxy [@Nightingale2018] [@pyautogalaxy]
- `PyAutoLens` https://github.com/Jammy2211/PyAutoLens [@Nightingale2015] [@Nightingale2018] [@pyautolens]
- `PyLops` https://github.com/equinor/pylops [@pylops]
- `PyNUFFT` https://github.com/jyhmiinlin/pynufft [@pynufft]
- `PySwarms` https://github.com/ljvmiranda921/pyswarms [@pyswarms]
- `Python` https://www.python.org/ [@python]
Expand Down
3 changes: 0 additions & 3 deletions files/citations.tex
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,6 @@ \section*{Software Citations}
\href{https://github.com/Jammy2211/PyAutoLens}{\textt{PyAutoLens}}
\citep{Nightingale2015, Nightingale2018, pyautolens}

\item
\href{https://github.com/equinor/pylops}{\textt{PyLops}}
\citep{pylops}

\item
\href{https://github.com/jyhmiinlin/pynufft}{\textt{PyNUFFT}}
Expand Down
1 change: 0 additions & 1 deletion optional_requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
numba
pylops>=1.10.0,<=2.3.1
pynufft
zeus-mcmc==2.5.4
getdist==1.4
Expand Down
12 changes: 1 addition & 11 deletions paper/paper.bib
Original file line number Diff line number Diff line change
Expand Up @@ -30,17 +30,7 @@ @article{astropy2
Volume = {156},
Year = 2018,
Bdsk-Url-1 = {https://doi.org/10.3847/1538-3881/aabc4f}}
@article{PyLops,
abstract = {Linear operators and optimisation are at the core of many algorithms used in signal and image processing, remote sensing, and inverse problems. For small to medium-scale problems, existing software packages (e.g., MATLAB, Python numpy and scipy) allow for explicitly building dense (or sparse) matrices and performing algebraic operations (e.g., computation of matrix-vector products and manipulation of matrices) with syntax that closely represents their corresponding analytical forms. However, many real application, large-scale operators do not lend themselves to explicit matrix representations, usually forcing practitioners to forego of the convenient linear-algebra syntax available for their explicit-matrix counterparts. PyLops is an open-source Python library providing a flexible and scalable framework for the creation and combination of so-called linear operators, class-based entities that represent matrices and inherit their associated syntax convenience, but do not rely on the creation of explicit matrices. We show that PyLops operators can dramatically reduce the memory load and CPU computations compared to explicit-matrix calculations, while still allowing users to seamlessly use their existing knowledge of compact matrix-based syntax that scales to any problem size because no explicit matrices are required.},
archivePrefix = {arXiv},
arxivId = {1907.12349},
author = {Ravasi, Matteo and Vasconcelos, Ivan},
eprint = {1907.12349},
file = {:home/jammy/Documents/Papers/Software/PyLops.pdf:pdf},
title = {{PyLops -- A Linear-Operator Python Library for large scale optimization}},
url = {http://arxiv.org/abs/1907.12349},
year = {2019}
}

@article{colossus,
abstract = {This paper introduces Colossus, a public, open-source python package for calculations related to cosmology, the large-scale structure (LSS) of matter in the universe, and the properties of dark matter halos. The code is designed to be fast and easy to use, with a coherent, well-documented user interface. The cosmology module implements Friedman-Lemaitre-Robertson-Walker cosmologies including curvature, relativistic species, and different dark energy equations of state, and provides fast computations of the linear matter power spectrum, variance, and correlation function. The LSS module is concerned with the properties of peaks in Gaussian random fields and halos in a statistical sense, including their peak height, peak curvature, halo bias, and mass function. The halo module deals with spherical overdensity radii and masses, density profiles, concentration, and the splashback radius. To facilitate the rapid exploration of these quantities, Colossus implements more than 40 different fitting functions from the literature. I discuss the core routines in detail, with particular emphasis on their accuracy. Colossus is available at bitbucket.org/bdiemer/colossus.},
archivePrefix = {arXiv},
Expand Down
4 changes: 1 addition & 3 deletions paper/paper.md
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,7 @@ effects like the telescope optics and background sky subtraction in the model-fi
performed directly on the observed visibilities in their native Fourier space, circumventing issues associated with the
incomplete sampling of the uv-plane that give rise to artefacts that can bias the inferred mass model and source
reconstruction in real-space. To make feasible the analysis of millions of visibilities, `PyAutoLens`
uses `PyNUFFT` [@pynufft] to fit the visibilities via a non-uniform fast Fourier transform and `PyLops` [@PyLops] to
express the memory-intensive linear algebra calculations as efficient linear operators [@Powell2020]. Creating
uses `PyNUFFT` [@pynufft] to fit the visibilities via a non-uniform fast Fourier transform. Creating
realistic simulations of imaging and interferometer strong lensing datasets is possible, as performed
by [@Alexander2019] [@Hermans2019] who used `PyAutoLens` to train neural networks to detect strong lenses.

Expand Down Expand Up @@ -198,7 +197,6 @@ taken without a local `PyAutoLens` installation.
- `numba` [@numba]
- `NumPy` [@numpy]
- `PyAutoFit` [@pyautofit]
- `PyLops` [@PyLops]
- `PyMultiNest` [@pymultinest] [@multinest]
- `PyNUFFT` [@pynufft]
- `pyprojroot` (https://github.com/chendaniely/pyprojroot)
Expand Down
65 changes: 0 additions & 65 deletions test_autolens/config/grids.yaml

This file was deleted.

1 change: 0 additions & 1 deletion test_autolens/config/notation.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ label:
weight_power: W_{\rm p}
superscript:
ExternalShear: ext
InputDeflections: defl
Pixelization: pix
Point: point
Redshift: ''
Expand Down
6 changes: 3 additions & 3 deletions test_autolens/imaging/model/test_analysis_imaging.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ def test__positions__resample__raises_exception(masked_imaging_7x7):


def test__positions__likelihood_overwrites__changes_likelihood(masked_imaging_7x7):
lens = al.Galaxy(redshift=0.5, mass=al.mp.IsothermalSph())
source = al.Galaxy(redshift=1.0, light=al.lp.SersicSph())
lens = al.Galaxy(redshift=0.5, mass=al.mp.IsothermalSph(centre=(0.05, 0.05)))
source = al.Galaxy(redshift=1.0, light=al.lp.SersicSph(centre=(0.05, 0.05)))

model = af.Collection(galaxies=af.Collection(lens=lens, source=source))

Expand All @@ -82,7 +82,7 @@ def test__positions__likelihood_overwrites__changes_likelihood(masked_imaging_7x
fit = al.FitImaging(dataset=masked_imaging_7x7, tracer=tracer)

assert fit.log_likelihood == pytest.approx(analysis_log_likelihood, 1.0e-4)
assert analysis_log_likelihood == pytest.approx(-6258.043397009, 1.0e-4)
assert analysis_log_likelihood == pytest.approx(-14.79034680979, 1.0e-4)

positions_likelihood = al.PositionsLHPenalty(
positions=al.Grid2DIrregular([(1.0, 100.0), (200.0, 2.0)]), threshold=0.01
Expand Down
4 changes: 2 additions & 2 deletions test_autolens/imaging/model/test_result_imaging.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
def test___linear_light_profiles_in_result(analysis_imaging_7x7):

galaxies = af.ModelInstance()
galaxies.galaxy = al.Galaxy(redshift=0.5, bulge=al.lp_linear.Sersic())
galaxies.galaxy = al.Galaxy(redshift=0.5, bulge=al.lp_linear.Sersic(centre=(0.05, 0.05)))

instance = af.ModelInstance()
instance.galaxies = galaxies
Expand All @@ -24,4 +24,4 @@ def test___linear_light_profiles_in_result(analysis_imaging_7x7):
)
assert result.max_log_likelihood_tracer.galaxies[
0
].bulge.intensity == pytest.approx(0.002310735, 1.0e-4)
].bulge.intensity == pytest.approx(0.1868684644, 1.0e-4)
Loading
Loading