diff --git a/.gitignore b/.gitignore index ee00577c47..25eb21a7a6 100644 --- a/.gitignore +++ b/.gitignore @@ -85,6 +85,7 @@ gammapy/gammapy.cfg docs/_generated docs/gen_modules docs/api +docs/sg_execution_times.rst .coverage dev/crab diff --git a/README.rst b/README.rst index 15bf159e32..2eb8965ab7 100644 --- a/README.rst +++ b/README.rst @@ -1,37 +1,43 @@ .. image:: https://anaconda.org/conda-forge/gammapy/badges/license.svg - :target: https://github.com/gammapy/gammapy/blob/master/LICENSE.rst - :alt: Licence + :target: https://github.com/gammapy/gammapy/blob/master/LICENSE.rst + :alt: Licence .. image:: http://img.shields.io/badge/powered%20by-AstroPy-orange.svg?style=flat - :target: http://www.astropy.org/ - + :target: http://www.astropy.org/ +| .. image:: https://anaconda.org/conda-forge/gammapy/badges/platforms.svg - :target: https://anaconda.org/conda-forge/gammapy - + :target: https://anaconda.org/conda-forge/gammapy +| .. image:: http://img.shields.io/pypi/v/gammapy.svg?text=version - :target: https://pypi.org/project/gammapy/ - :alt: Latest release + :target: https://pypi.org/project/gammapy/ + :alt: Latest release .. image:: https://img.shields.io/pypi/dm/gammapy?label=downloads%20%7C%20pip&logo=PyPI :alt: PyPI - Downloads .. image:: https://anaconda.org/conda-forge/gammapy/badges/version.svg - :target: https://anaconda.org/conda-forge/gammapy + :target: https://anaconda.org/conda-forge/gammapy .. image:: https://img.shields.io/conda/dn/conda-forge/gammapy?label=downloads%20%7C%20conda&logo=Conda-Forge - :target: https://anaconda.org/conda-forge/gammapy + :target: https://anaconda.org/conda-forge/gammapy .. image:: https://img.shields.io/badge/launch-binder-579aca.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFkAAABZCAMAAABi1XidAAAB8lBMVEX///9XmsrmZYH1olJXmsr1olJXmsrmZYH1olJXmsr1olJXmsrmZYH1olL1olJXmsr1olJXmsrmZYH1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olJXmsrmZYH1olL1olL0nFf1olJXmsrmZYH1olJXmsq8dZb1olJXmsrmZYH1olJXmspXmspXmsr1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olLeaIVXmsrmZYH1olL1olL1olJXmsrmZYH1olLna31Xmsr1olJXmsr1olJXmsrmZYH1olLqoVr1olJXmsr1olJXmsrmZYH1olL1olKkfaPobXvviGabgadXmsqThKuofKHmZ4Dobnr1olJXmsr1olJXmspXmsr1olJXmsrfZ4TuhWn1olL1olJXmsqBi7X1olJXmspZmslbmMhbmsdemsVfl8ZgmsNim8Jpk8F0m7R4m7F5nLB6jbh7jbiDirOEibOGnKaMhq+PnaCVg6qWg6qegKaff6WhnpKofKGtnomxeZy3noG6dZi+n3vCcpPDcpPGn3bLb4/Mb47UbIrVa4rYoGjdaIbeaIXhoWHmZYHobXvpcHjqdHXreHLroVrsfG/uhGnuh2bwj2Hxk17yl1vzmljzm1j0nlX1olL3AJXWAAAAbXRSTlMAEBAQHx8gICAuLjAwMDw9PUBAQEpQUFBXV1hgYGBkcHBwcXl8gICAgoiIkJCQlJicnJ2goKCmqK+wsLC4usDAwMjP0NDQ1NbW3Nzg4ODi5+3v8PDw8/T09PX29vb39/f5+fr7+/z8/Pz9/v7+zczCxgAABC5JREFUeAHN1ul3k0UUBvCb1CTVpmpaitAGSLSpSuKCLWpbTKNJFGlcSMAFF63iUmRccNG6gLbuxkXU66JAUef/9LSpmXnyLr3T5AO/rzl5zj137p136BISy44fKJXuGN/d19PUfYeO67Znqtf2KH33Id1psXoFdW30sPZ1sMvs2D060AHqws4FHeJojLZqnw53cmfvg+XR8mC0OEjuxrXEkX5ydeVJLVIlV0e10PXk5k7dYeHu7Cj1j+49uKg7uLU61tGLw1lq27ugQYlclHC4bgv7VQ+TAyj5Zc/UjsPvs1sd5cWryWObtvWT2EPa4rtnWW3JkpjggEpbOsPr7F7EyNewtpBIslA7p43HCsnwooXTEc3UmPmCNn5lrqTJxy6nRmcavGZVt/3Da2pD5NHvsOHJCrdc1G2r3DITpU7yic7w/7Rxnjc0kt5GC4djiv2Sz3Fb2iEZg41/ddsFDoyuYrIkmFehz0HR2thPgQqMyQYb2OtB0WxsZ3BeG3+wpRb1vzl2UYBog8FfGhttFKjtAclnZYrRo9ryG9uG/FZQU4AEg8ZE9LjGMzTmqKXPLnlWVnIlQQTvxJf8ip7VgjZjyVPrjw1te5otM7RmP7xm+sK2Gv9I8Gi++BRbEkR9EBw8zRUcKxwp73xkaLiqQb+kGduJTNHG72zcW9LoJgqQxpP3/Tj//c3yB0tqzaml05/+orHLksVO+95kX7/7qgJvnjlrfr2Ggsyx0eoy9uPzN5SPd86aXggOsEKW2Prz7du3VID3/tzs/sSRs2w7ovVHKtjrX2pd7ZMlTxAYfBAL9jiDwfLkq55Tm7ifhMlTGPyCAs7RFRhn47JnlcB9RM5T97ASuZXIcVNuUDIndpDbdsfrqsOppeXl5Y+XVKdjFCTh+zGaVuj0d9zy05PPK3QzBamxdwtTCrzyg/2Rvf2EstUjordGwa/kx9mSJLr8mLLtCW8HHGJc2R5hS219IiF6PnTusOqcMl57gm0Z8kanKMAQg0qSyuZfn7zItsbGyO9QlnxY0eCuD1XL2ys/MsrQhltE7Ug0uFOzufJFE2PxBo/YAx8XPPdDwWN0MrDRYIZF0mSMKCNHgaIVFoBbNoLJ7tEQDKxGF0kcLQimojCZopv0OkNOyWCCg9XMVAi7ARJzQdM2QUh0gmBozjc3Skg6dSBRqDGYSUOu66Zg+I2fNZs/M3/f/Grl/XnyF1Gw3VKCez0PN5IUfFLqvgUN4C0qNqYs5YhPL+aVZYDE4IpUk57oSFnJm4FyCqqOE0jhY2SMyLFoo56zyo6becOS5UVDdj7Vih0zp+tcMhwRpBeLyqtIjlJKAIZSbI8SGSF3k0pA3mR5tHuwPFoa7N7reoq2bqCsAk1HqCu5uvI1n6JuRXI+S1Mco54YmYTwcn6Aeic+kssXi8XpXC4V3t7/ADuTNKaQJdScAAAAAElFTkSuQmCC :target: mybinder.org - +| .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.4701488.svg?style=flat - :target: https://doi.org/10.5281/zenodo.4701488 + :target: https://doi.org/10.5281/zenodo.4701488 .. image:: https://archive.softwareheritage.org/badge/origin/https://github.com/gammapy/gammapy/ - :target: https://archive.softwareheritage.org/browse/origin/?origin_url=https://github.com/gammapy/gammapy + :target: https://archive.softwareheritage.org/browse/origin/?origin_url=https://github.com/gammapy/gammapy +.. image:: https://archive.softwareheritage.org/badge/swh:1:snp:11e81660a96c3252c4afa4d48b58d1d2f78ea80a/ + :target: https://archive.softwareheritage.org/swh:1:snp:11e81660a96c3252c4afa4d48b58d1d2f78ea80a;origin=https://github.com/gammapy/gammapy +| .. image:: https://img.shields.io/badge/A&A-Published-Green.svg - :target: https://doi.org/10.1051/0004-6361/202346488 + :target: https://doi.org/10.1051/0004-6361/202346488 +| +.. image:: https://img.shields.io/badge/Matomo-3152A0?style=for-the-badge&logo=Matomo&logoColor=white + :target: https://matomo.org/ Gammapy ======= diff --git a/codemeta.json b/codemeta.json index e1912bd42f..61e9c96d32 100644 --- a/codemeta.json +++ b/codemeta.json @@ -253,18 +253,31 @@ } ], "codeRepository": "https://github.com/gammapy/gammapy", - "datePublished": "2024-02-29", + "dateCreated": "2013-08-19", + "datePublished": "2014-08-25", + "dateModified": "2024-02-29", "description": "Gammapy analyzes gamma-ray data and creates sky images, spectra and lightcurves, from event lists and instrument response information; it can also determine the position, morphology and spectra of gamma-ray sources. It is used to analyze data from H.E.S.S., Fermi-LAT, HAWC, and the Cherenkov Telescope Array (CTA).", "identifier": "https://doi.org/10.5281/zenodo.4701488", "keywords": [ "Astronomy", "Gamma-rays", - "Data analysis" + "Data analysis", + "Scientific/Engineering" ], "license": "https://spdx.org/licenses/BSD-3-Clause", "name": "Gammapy: Python toolbox for gamma-ray astronomy", "url": "https://gammapy.org/", "softwareVersion": "v1.2", + "applicationCategory": "Astronomy", + "applicationSubCategory": "Very-high-energy gamma rays", + "programmingLanguage": "Python 3", + "operatingSystem": "Linux, Mac OS, Windows", + "referencePublication": "https://doi.org/10.1051/0004-6361/202346488", + "audience": { + "@id": "/audience/science-research", + "@type": "Audience", + "audienceType": "Science/Research" + }, "maintainer": { "@id": "https://orcid.org/0000-0003-4568-7005", "@type": "Person", diff --git a/dev/codemeta.py b/dev/codemeta.py index e68b13e474..bc0a604a44 100644 --- a/dev/codemeta.py +++ b/dev/codemeta.py @@ -4,6 +4,7 @@ import logging from configparser import ConfigParser import click +from datetime import date log = logging.getLogger(__name__) @@ -24,6 +25,10 @@ def update_codemeta(maintainer, filename, setup_file=None): data["developmentStatus"] = ("active",) data["email"] = "GAMMAPY-COORDINATION-L@IN2P3.FR" + modified_date = str(date.today()) + data["dateModified"] = modified_date + + if setup_file: # complete with software requirements from setup.cfg cf = ConfigParser() diff --git a/dev/github_summary.py b/dev/github_summary.py index 9143966124..12ecbe900f 100644 --- a/dev/github_summary.py +++ b/dev/github_summary.py @@ -198,7 +198,7 @@ def list_merged_PRs(filename, milestones, from_backports): table = Table.read(filename) # Keep only merged PRs - table = table[table["is_merged"] is True] + table = table[table["is_merged"] == True] # Keep the requested milestones valid = np.zeros((len(table)), dtype="bool") diff --git a/docs/_static/custom.css b/docs/_static/custom.css index 9e42dfd437..a5ee0a3c22 100644 --- a/docs/_static/custom.css +++ b/docs/_static/custom.css @@ -88,7 +88,7 @@ p { color: white; } -#navbar-icon-links i.fa-twitter-square::before { +#navbar-icon-links i.fa-square-x-twitter::before { color: white; } diff --git a/docs/_static/matomo.js b/docs/_static/matomo.js new file mode 100644 index 0000000000..643bfb5216 --- /dev/null +++ b/docs/_static/matomo.js @@ -0,0 +1,15 @@ +// Licensed under a 3-clause BSD style license - see LICENSE.rst + +// Matomo Tracking Code + var _paq = window._paq = window._paq || []; + /* tracker methods like "setCustomDimension" should be called before "trackPageView" */ + _paq.push(['trackPageView']); + _paq.push(['enableLinkTracking']); + (function() { + var u="https://apcstatview.in2p3.fr/"; + _paq.push(['setTrackerUrl', u+'matomo.php']); + _paq.push(['setSiteId', '3']); + var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0]; + g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s); + })(); + diff --git a/docs/conf.py b/docs/conf.py index 535c02c0dd..86a2edbc91 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -26,6 +26,8 @@ # be accessible, and the documentation will not build correctly. import datetime +import sys +import os # Get configuration information from setup.cfg from configparser import ConfigParser @@ -35,7 +37,7 @@ from sphinx_astropy.conf import * # Sphinx-gallery config -from sphinx_gallery.sorting import ExplicitOrder, FileNameSortKey +from sphinx_gallery.sorting import ExplicitOrder # Load utils docs functions from gammapy.utils.docs import SubstitutionCodeBlock, gammapy_sphinx_ext_activate @@ -56,6 +58,8 @@ def setup(app): conf.read([os.path.join(os.path.dirname(__file__), "..", "setup.cfg")]) setup_cfg = dict(conf.items("metadata")) +sys.path.insert(0, os.path.dirname(__file__)) + linkcheck_anchors_ignore = [] linkcheck_ignore = [ "http://gamma-sky.net/#", @@ -70,6 +74,8 @@ def setup(app): "https://www.hawc-observatory.org/", # invalid certificate "https://ipython.org", # invalid certificate "https://jupyter.org", # invalid certificate + "https://hess-confluence.desy.de/confluence/display/HESS/HESS+FITS+data", # private page + "https://hess-confluence.desy.de/" ] # the buttons link to html pages which are auto-generated... @@ -111,7 +117,6 @@ def setup(app): # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns.append("_templates") -exclude_patterns.append("_static") exclude_patterns.append("**.ipynb_checkpoints") exclude_patterns.append("user-guide/model-gallery/*/*.ipynb") exclude_patterns.append("user-guide/model-gallery/*/*.md5") @@ -220,7 +225,7 @@ def setup(app): { "name": "Twitter", "url": "https://twitter.com/gammapyST", - "icon": "fab fa-twitter-square", + "icon": "fab fa-square-x-twitter", }, { "name": "Slack", @@ -234,6 +239,10 @@ def setup(app): }, "navbar_end": ["version-switcher", "theme-switcher", "navbar-icon-links"], "navigation_with_keys": True, + # footers + "footer_start": ["copyright"], + "footer_center": ["last-updated"], + "footer_end": ["sphinx-version", "theme-version"] } @@ -314,7 +323,7 @@ def setup(app): "exclude_implicit_doc": {}, "filename_pattern": r"\.py", "reset_modules": ("matplotlib",), - "within_subsection_order": FileNameSortKey, + "within_subsection_order": "sphinxext.TutorialExplicitOrder", "download_all_examples": True, "capture_repr": ("_repr_html_", "__repr__"), "nested_sections": False, @@ -328,11 +337,12 @@ def setup(app): } html_static_path = ["_static"] - -html_css_files = [ - "custom.css", -] +html_css_files = ["custom.css"] +html_js_files = ["matomo.js"] html_context = { "default_mode": "light", } + +# Add-on to insert the Matomo tracker +templates_path = ['_templates'] diff --git a/docs/development/dev_howto.rst b/docs/development/dev_howto.rst index 4f4d210c89..86450c64b9 100644 --- a/docs/development/dev_howto.rst +++ b/docs/development/dev_howto.rst @@ -70,9 +70,12 @@ Notes: Gammapy names defined here, to the names used in the formats. Of course, where formats are not set in stone yet, we advocate and encourage the use of the names chosen here. -* Finally, we realise that eventually probably CTA will define this, and Gammapy - is only a prototype. So if CTA chooses something else, probably we will follow - suite and do one more backward-incompatible change at some point to align with CTA. +* Finally, CTAO has proposed a full data model associated to the needed quantities. And + the community is working to create a data format for very-high-energy data produced by gamma-ray + and neutrino experiments within the open initiative `Very-high-energy Open Data Format`_. This format + aims to respect the CTAO data model, to respect the `FAIR principles`_ and to follow as much as + possible the `IVOA`_ recommendations. In order to handle past, current and old formats, + Gammapy should follow the `FAIR4RS principles`_ by proposing a user-friendly interface. Clobber or overwrite? +++++++++++++++++++++ diff --git a/docs/development/doc_howto.rst b/docs/development/doc_howto.rst index a08b676a34..dc62ac4eaa 100644 --- a/docs/development/doc_howto.rst +++ b/docs/development/doc_howto.rst @@ -144,7 +144,7 @@ following command. pytest --doctest-modules --ignore-glob=*/tests gammapy -If you get a zsh error try using putting to ignore block inside quotes +If you get a zsh error try using putting to ignore block inside quotes .. code-block:: bash @@ -157,6 +157,7 @@ The documentation built-in process uses the `sphinx-gallery `__, -please refer to it for more details. +please refer to it for more details. -The tooltip is the text that appears when you hover over the thumbnail. It is taken from the first line +The tooltip is the text that appears when you hover over the thumbnail. It is taken from the first line of the docstring of the tutorial. You can change it by editing the docstring. See e.g. `Analysis 1 Tutorial `__. @@ -194,7 +195,7 @@ Links in tutorials are just handled via normal RST syntax. Links to other tutorials ++++++++++++++++++++++++ -From docstrings and RST documentation files in Gammapy you can link to other tutorials +From docstrings and RST documentation files in Gammapy you can link to other tutorials and gallery examples by using RST syntax like this: .. code-block:: rst @@ -217,7 +218,7 @@ To make a reference to a heading within an RST file, first you need to define an ============== -The reference is the rendered as ``datasets``. +The reference is the rendered as ``datasets``. To link to this in the documentation you can use: .. code-block:: rst diff --git a/docs/development/release.rst b/docs/development/release.rst index 0977499520..bf558988b5 100644 --- a/docs/development/release.rst +++ b/docs/development/release.rst @@ -90,9 +90,8 @@ Steps for the day to announce the release: (decide on a case by case basis, if it's relevant to the group of people): * https://groups.google.com/forum/#!forum/astropy-dev - * https://lists.nasa.gov/mailman/listinfo/open-gamma-ray-astro - * CTA DATA list (cta-wp-dm@cta-observatory.org) - * hess-analysis + * CTAO AS WG list (cta-wg-as [at] cta-observatory [dot] org) + * hess-forum list (hess-forum [at] lsw.uni-heidelberg [dot] de) #. Make sure the release milestone and issue is closed on GitHub #. Update these release notes with any useful infos / steps that you learned while making the release (ideally try to script / automate the task or check, diff --git a/docs/getting-started/index.rst b/docs/getting-started/index.rst index 0f57c3b72d..3bcce29737 100644 --- a/docs/getting-started/index.rst +++ b/docs/getting-started/index.rst @@ -45,7 +45,9 @@ environment using either conda or mamba.** Here are two methods to quickly insta Update existing version? Working with virtual environments? Installing a specific version? Check the advanced installation page. - .. button-ref:: install + +++ + + .. button-ref:: installation :ref-type: ref :click-parent: :color: secondary @@ -66,7 +68,7 @@ Tutorials Overview :link: ../tutorials/data/cta.html Gammapy can read and access data from multiple gamma-ray instruments. Data from -Imaging Atmospheric Cherenkov Telescopes, such as `CTA`_, `H.E.S.S.`_, `MAGIC`_ +Imaging Atmospheric Cherenkov Telescopes, such as `CTAO`_, `H.E.S.S.`_, `MAGIC`_ and `VERITAS`_, is typically accessed from the **event list data level**, called "DL3". This is most easily done using the `~gammapy.data.DataStore` class. In addition data can also be accessed from the **level of binned events and pre-reduced instrument response functions**, @@ -74,7 +76,7 @@ so called "DL4". This is typically the case for `Fermi-LAT`_ data or data from Water Cherenkov Observatories. This data can be read directly using the `~gammapy.maps.Map` and `~gammapy.irf.core.IRFMap` classes. -:bdg-link-primary:`CTA data tutorial <../tutorials/data/cta.html>` +:bdg-link-primary:`CTAO data tutorial <../tutorials/data/cta.html>` :bdg-link-primary:`HESS data tutorial <../tutorials/data/hess.html>` :bdg-link-primary:`Fermi-LAT data tutorial <../tutorials/data/fermi_lat.html>` diff --git a/docs/getting-started/usage.rst b/docs/getting-started/usage.rst index 3e2cf64b32..2232e27aa7 100644 --- a/docs/getting-started/usage.rst +++ b/docs/getting-started/usage.rst @@ -48,8 +48,7 @@ Gammapy is a Python package, so you can of course import and use it from Python: Type "help", "copyright", "credits" or "license" for more information. >>> from gammapy.stats import CashCountsStatistic >>> CashCountsStatistic(n_on=10, mu_bkg=4.2).sqrt_ts - 2.397918129147546 - + np.float64(2.397918129147546) IPython ------- diff --git a/docs/index.rst b/docs/index.rst index 2fe2c37e2e..1339b7d816 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -18,7 +18,7 @@ Gammapy Gammapy is a community-developed, open-source Python package for gamma-ray -astronomy built on Numpy, Scipy and Astropy. **It is the core library for the** `CTA`_ **Science Tools** +astronomy built on Numpy, Scipy and Astropy. **It is the core library for the** `CTAO`_ **Science Tools** but can also be used to analyse data from existing imaging atmospheric Cherenkov telescopes (IACTs), such as `H.E.S.S.`_, `MAGIC`_ and `VERITAS`_. It also provides some support for `Fermi-LAT`_ and `HAWC`_ data analysis. diff --git a/docs/references.txt b/docs/references.txt index 439821d8f3..5aa944d4fa 100644 --- a/docs/references.txt +++ b/docs/references.txt @@ -59,7 +59,7 @@ .. _Python for gamma-ray astronomy 2015: http://gammapy.github.io/PyGamma15/ -.. _CTA: https://www.cta-observatory.org/ +.. _CTAO: https://www.ctao.org/ .. _H.E.S.S.: https://www.mpi-hd.mpg.de/hfm/HESS/ .. _HAWC: https://www.hawc-observatory.org/ .. _VERITAS: https://veritas.sao.arizona.edu/ @@ -69,6 +69,7 @@ .. _FermiPy: https://github.com/fermiPy/fermipy .. _gadf: https://gamma-astro-data-formats.readthedocs.io/ .. _Gamma Astro Data Formats: https://gamma-astro-data-formats.readthedocs.io/ +.. _Very-high-energy Open Data Format: https://vodf.readthedocs.io/en/latest/index.html .. _April 2016 IACT data meeting: https://github.com/open-gamma-ray-astro/2016-04_IACT_DL3_Meeting/ @@ -135,3 +136,7 @@ .. _A Whirlwind tour of Python: https://nbviewer.org/github/jakevdp/WhirlwindTourOfPython/blob/master/Index.ipynb .. _Python data science handbook: https://jakevdp.github.io/PythonDataScienceHandbook/ .. _Astropy Hands-On Tutorial: https://github.com/Asterics2020-Obelics/School2019/tree/master/astropy + +.. _FAIR principles: https://www.go-fair.org/fair-principles/ +.. _FAIR4RS principles: https://www.rd-alliance.org/group_output/fair-principles-for-research-software-fair4rs-principles/ +.. _IVOA: https://ivoa.net/ \ No newline at end of file diff --git a/docs/sphinxext.py b/docs/sphinxext.py new file mode 100644 index 0000000000..d813cafc11 --- /dev/null +++ b/docs/sphinxext.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# Licensed under a 3-clause BSD style license - see LICENSE.rst +# +# Gammapy documentation ordering configuration file. +# +# GalleryExplicitOrder is a re-implemation of private sphinx_gallery class `_SortKey`. +# + + +TUTORIAL_SORT_DICT = { + # **Tutorials** + # introductory + "overview.py": 0, + "analysis_1.py": 1, + "analysis_2.py": 2, + # data exploration + "hess.py": 0, + "cta.py": 1, + "fermi_lat.py": 2, + "hawc.py": 3, + # data analysis + # 1d + "cta_sensitivity.py": 0, + "spectral_analysis.py": 1, + "spectral_analysis_hli.py": 2, + "spectral_analysis_rad_max.py": 3, + "extended_source_spectral_analysis.py": 4, + "spectrum_simulation.py": 5, + "sed_fitting.py": 6, + "ebl.py": 7, + # 2d + "detect.py": 0, + "ring_background.py": 1, + "modeling_2D.py": 2, + # 3d + "analysis_3d.py": 0, + "cta_data_analysis.py": 1, + "energy_dependent_estimation.py": 2, + "analysis_mwl.py": 3, + "simulate_3d.py": 4, + "event_sampling.py": 5, + "event_sampling_nrg_depend_models.py": 6, + "flux_profiles.py": 7, + # time + "light_curve.py": 0, + "light_curve_flare.py": 1, + "variability_estimation.py": 2, + "time_resolved_spectroscopy.py": 3, + "light_curve_simulation.py": 4, + "pulsar_analysis.py": 5, + # api + "observation_clustering.py": 9, + "irfs.py": 0, + "maps.py": 4, + "mask_maps.py": 5, + "makers.py": 7, + "datasets.py": 6, + "models.py": 1, + "priors.py": 2, + "model_management.py": 10, + "fitting.py": 11, + "estimators.py": 12, + "catalog.py": 8, + "astro_dark_matter.py": 3, + # scripts + "survey_map.py": 0, +} + + +class BaseExplicitOrder: + """ + Base class inspired by sphinx_gallery _SortKey to customize sorting based on a + dictionary. The dictionary should contain the filename and its order in the + subsection. + """ + + def __repr__(self): + return f"<{self.__class__.__name__}>" + + def __init__(self, src_dir): + self.src_dir = src_dir + + +class TutorialExplicitOrder(BaseExplicitOrder): + """ + Class that handle the ordering of the tutorials in each gallery subsection. + """ + + sort_dict = TUTORIAL_SORT_DICT + + def __call__(self, filename): + if filename in self.sort_dict.keys(): + return self.sort_dict.get(filename) + else: + return 0 diff --git a/docs/user-guide/astro/darkmatter/index.rst b/docs/user-guide/astro/darkmatter/index.rst index 2509f74c84..daf81c33cb 100644 --- a/docs/user-guide/astro/darkmatter/index.rst +++ b/docs/user-guide/astro/darkmatter/index.rst @@ -84,7 +84,7 @@ gamLike `GamLike`_ contains likelihood functions for most leading gamma-ray indirect searches for dark matter, including Fermi-LAT observations of dwarfs and the Galactic Centre (GC), HESS observations of the GC, and projected sensitivities -for CTA observations of the GC. It is released in tandem with the `GAMBIT`_ +for CTAO observations of the GC. It is released in tandem with the `GAMBIT`_ module `DarkBit`_. DarkBit can be used for directly computing observables and likelihoods, for any combination of parameter values in some underlying particle model. diff --git a/docs/user-guide/datasets/index.rst b/docs/user-guide/datasets/index.rst index 44bb3e1fe1..19f73627b0 100644 --- a/docs/user-guide/datasets/index.rst +++ b/docs/user-guide/datasets/index.rst @@ -242,10 +242,15 @@ serialised as a `~gammapy.modeling.models.TemplateSpectralModel`. Using gammapy.datasets ---------------------- -.. minigallery:: gammapy.datasets.MapDataset - :add-heading: +.. minigallery:: + :add-heading: Examples using `~gammapy.datasets.MapDataset` + ../examples/tutorials/starting/analysis_1.py + ../examples/tutorials/analysis-3d/analysis_3d.py -.. minigallery:: gammapy.datasets.SpectrumDatasetOnOff - :add-heading: +.. minigallery:: + :add-heading: Examples using `~gammapy.datasets.SpectrumDataset` + + ../examples/tutorials/analysis-1d/spectral_analysis.py + ../examples/tutorials/analysis-1d/spectral_analysis_hli.py diff --git a/docs/user-guide/dl3.rst b/docs/user-guide/dl3.rst index e2a95a40bc..abea89a040 100644 --- a/docs/user-guide/dl3.rst +++ b/docs/user-guide/dl3.rst @@ -138,10 +138,10 @@ Writing event lists and GTIs to file ------------------------------------ To write the events or GTIs separately, one can just save the underlying -`astropy.table.Table`. To have an event file written in a correct DL3 format, it is +`astropy.table.Table`. To have an event file written in a correct DL3 format, it is necessary to utilise the ``write`` method available for`~gammapy.data.Observation`. It will write the `~gammapy.data.EventList` and their associated GTIs together in the -same FITS file according to the format specifications. To avoid writing IRFs along the +same FITS file according to the format specifications. To avoid writing IRFs along the ``EventList`` one has to set ``include_irfs`` to ``False``. See the example below: .. testcode:: @@ -164,8 +164,8 @@ same FITS file according to the format specifications. To avoid writing IRFs alo Using gammapy.data ------------------ -.. minigallery:: gammapy.data.EventList - :add-heading: +.. minigallery:: -.. minigallery:: gammapy.data.DataStore - :add-heading: + ../examples/tutorials/starting/overview.py + ../examples/tutorials/starting/analysis_1.py + ../examples/tutorials/api/observation_clustering.py diff --git a/docs/user-guide/estimators.rst b/docs/user-guide/estimators.rst index 21288af099..76bc6843d0 100644 --- a/docs/user-guide/estimators.rst +++ b/docs/user-guide/estimators.rst @@ -165,9 +165,9 @@ This how to compute flux maps with the `ExcessMapEstimator`: geom : WcsGeom axes : ['lon', 'lat', 'energy'] - shape : (320, 240, 2) + shape : (np.int64(320), np.int64(240), 2) ndim : 3 - unit : 1 / (cm2 s) + unit : 1 / (s cm2) dtype : float64 @@ -211,11 +211,22 @@ This is how to compute flux points: Using gammapy.estimators ------------------------ -.. minigallery:: gammapy.estimators.FluxPointsEstimator - :add-heading: +.. minigallery:: + + ../examples/tutorials/api/estimators.py + +.. minigallery:: + :add-heading: Examples using `~gammapy.estimators.FluxPointsEstimator` + + ../examples/tutorials/analysis-1d/spectral_analysis.py + ../examples/tutorials/analysis-3d/analysis_mwl.py + +.. minigallery:: + :add-heading: Examples using `~gammapy.estimators.LightCurveEstimator` + + ../examples/tutorials/analysis-time/light_curve.py + ../examples/tutorials/analysis-time/light_curve_flare.py -.. minigallery:: gammapy.estimators.LightCurveEstimator - :add-heading: .. _`likelihood SED type page`: https://gamma-astro-data-formats.readthedocs.io/en/latest/spectra/binned_likelihoods/index.html diff --git a/docs/user-guide/howto.rst b/docs/user-guide/howto.rst index d7ce187f7d..a058d500af 100644 --- a/docs/user-guide/howto.rst +++ b/docs/user-guide/howto.rst @@ -179,7 +179,7 @@ It is possible to combine Gammapy with astrophysical modeling codes, if they provide a Python interface. Usually this requires some glue code to be written, e.g. `~gammapy.modeling.models.NaimaSpectralModel` is an example of a Gammapy wrapper class around the Naima spectral model and radiation classes, which then -allows modeling and fitting of Naima models within Gammapy (e.g. using CTA, +allows modeling and fitting of Naima models within Gammapy (e.g. using CTAO, H.E.S.S. or Fermi-LAT data). .. accordion-footer:: @@ -327,7 +327,8 @@ Note that you can create your own style with matplotlib (see `here `__ and `here `__) -The CTA observatory released a document describing best practices for **data visualisation in a way friendly to +.. TODO: update the link +The CTAO observatory released a document describing best practices for **data visualisation in a way friendly to color-blind people**: `CTAO document `_. To use them, you should add into your notebooks or scripts the following lines after the Gammapy imports: diff --git a/docs/user-guide/irf/index.rst b/docs/user-guide/irf/index.rst index 790cde35e0..34b1df59ab 100644 --- a/docs/user-guide/irf/index.rst +++ b/docs/user-guide/irf/index.rst @@ -21,7 +21,7 @@ We can write the expected number of detected events :math:`N(p, E)`: .. math:: - N(p, E) {\rm d}p {\rm d}E = + N(p, E) {\rm d}p {\rm d}E = t_{\rm obs} \int_{E_{\rm true}} {\rm d}E_{\rm true} \, \int_{p_{\rm true}} {\rm d}p_{\rm true} \, R(p, E|p_{\rm true}, E_{\rm true}) \times \Phi(p_{\rm true}, E_{\rm true}) where: @@ -47,11 +47,11 @@ where: of the detector collection area times its detection efficiency at true energy :math:`E_{\rm true}` and position :math:`p_{\rm true}`. * :math:`PSF(p|p_{\rm true}, E_{\rm true})` is the point spread function (unit: :math:`{\rm sr}^{-1}`). It gives the probability of measuring a direction :math:`p` when the true direction is :math:`p_{\rm true}` and the true energy is :math:`E_{\rm true}`. - Gamma-ray instruments consider the probability density of the angular separation between true and reconstructed directions + Gamma-ray instruments consider the probability density of the angular separation between true and reconstructed directions :math:`\delta p = p_{\rm true} - p`, i.e. :math:`PSF(\delta p|p_{\rm true}, E_{\rm true})`. * :math:`E_{\rm disp}(E|p_{\rm true}, E_{\rm true})` is the energy dispersion (unit: :math:`{\rm TeV}^{-1}`). It gives the probability to reconstruct the photon at energy :math:`E` when the true energy is :math:`E_{\rm true}` and the true position :math:`p_{\rm true}`. - Gamma-ray instruments consider the probability density of the migration :math:`\mu=\frac{E}{E_{\rm true}}`, + Gamma-ray instruments consider the probability density of the migration :math:`\mu=\frac{E}{E_{\rm true}}`, i.e. :math:`E_{\rm disp}(\mu|p_{\rm true}, E_{\rm true})`. The implicit assumption here is that energy dispersion and PSF are completely independent. This is not totally @@ -92,16 +92,10 @@ Variable Definition Using gammapy.irf ----------------- -.. minigallery:: gammapy.irf.PSFMap - :add-heading: +.. minigallery:: - -.. minigallery:: gammapy.irf.EDispKernelMap - :add-heading: - - -.. minigallery:: gammapy.irf.load_irf_dict_from_file - :add-heading: + ../examples/tutorials/api/irfs.py + ../examples/tutorials/analysis-1d/cta_sensitivity.py .. toctree:: diff --git a/docs/user-guide/makers/index.rst b/docs/user-guide/makers/index.rst index 734c8d7724..e67091add6 100644 --- a/docs/user-guide/makers/index.rst +++ b/docs/user-guide/makers/index.rst @@ -30,9 +30,22 @@ The definition of a safe data range is done using the `SafeMaskMaker` or manuall Using gammapy.makers -------------------- -.. minigallery:: gammapy.makers.MapDatasetMaker - :add-heading: +.. minigallery:: + ../examples/tutorials/api/makers.py + + +.. minigallery:: + :add-heading: Examples using `~gammapy.makers.SpectrumDatasetMaker` + + ../examples/tutorials/analysis-1d/spectral_analysis.py + ../examples/tutorials/analysis-1d/spectral_analysis_rad_max.py + ../examples/tutorials/analysis-1d/extended_source_spectral_analysis.py + + +.. minigallery:: + :add-heading: Examples using `~gammapy.makers.MapDatasetMaker` + + ../examples/tutorials/starting/analysis_1.py + ../examples/tutorials/data/hawc.py -.. minigallery:: gammapy.makers.SpectrumDatasetMaker - :add-heading: \ No newline at end of file diff --git a/docs/user-guide/maps/index.rst b/docs/user-guide/maps/index.rst index 6eb0244b48..9b783254e8 100644 --- a/docs/user-guide/maps/index.rst +++ b/docs/user-guide/maps/index.rst @@ -293,16 +293,11 @@ however, the user must ensure that the array has been correctly broadcasted. Using gammapy.maps ------------------ -.. minigallery:: gammapy.maps.WcsNDMap - :add-heading: +.. minigallery:: + ../examples/tutorials/api/maps.py + ../examples/tutorials/api/mask_maps.py -.. minigallery:: gammapy.maps.RegionNDMap - :add-heading: - - -.. minigallery:: gammapy.maps.HpxNDMap - :add-heading: .. toctree:: :maxdepth: 1 diff --git a/docs/user-guide/modeling.rst b/docs/user-guide/modeling.rst index 26f9a54dae..032f880952 100644 --- a/docs/user-guide/modeling.rst +++ b/docs/user-guide/modeling.rst @@ -49,8 +49,16 @@ Gammapy provides an easy interface to :ref:`custom-model`. Using gammapy.modeling ---------------------- -.. minigallery:: gammapy.modeling.Fit - :add-heading: +.. minigallery:: + + ../examples/tutorials/api/fitting.py + ../examples/tutorials/api/models.py + ../examples/tutorials/api/model_management.py + ../examples/tutorials/analysis-1d/spectral_analysis.py + ../examples/tutorials/analysis-3d/analysis_3d.py + ../examples/tutorails/analysis-3d/analysis_mwl.py + ../examples/tutorials/analysis-1d/sed_fitting.py + ../examples/tutorials/api/priors.py .. include:: ../references.txt diff --git a/docs/user-guide/package.rst b/docs/user-guide/package.rst index 242864800c..d6e0b985cd 100644 --- a/docs/user-guide/package.rst +++ b/docs/user-guide/package.rst @@ -42,7 +42,7 @@ the Gammapy API are explained in more detail in the following. Fig. 1 Data flow and sub-package structure of Gammapy. The folder icons represent the corresponding sub-packages. The direction of the the data flow is illustrated with shaded arrows. The top section - shows the data levels as defined by `CTA`_. + shows the data levels as defined by `CTAO`_. Analysis steps -------------- diff --git a/docs/user-guide/references.rst b/docs/user-guide/references.rst index 576f673fab..76e00a2131 100644 --- a/docs/user-guide/references.rst +++ b/docs/user-guide/references.rst @@ -91,7 +91,7 @@ Glossary GTI Short for "good time interval": it indicates a continuous time interval of data - acquisition. In CTA, it also represents a time interval in which the IRFs are + acquisition. In the GADF DL3 format, it also represents a time interval in which the IRFs are supposed to be constant. IRF @@ -109,8 +109,12 @@ Glossary Gammapy utility classes performing data reduction of the DL3 data to binned datasets (DL4). See :ref:`makers` and the :ref:`data flow `. + MCMC + Short for "Markov chain Monte Carlo". See the + `following recipe `_. + MET - Short for "mission elapsed time". see also :ref:`MET_definition` in :ref:`time_handling`. + Short for "mission elapsed time". See also :ref:`MET_definition` in :ref:`time_handling`. PSF Short for "point spread function": it is the IRF representing the probability density of the angular separation @@ -137,6 +141,10 @@ Glossary object, the type of plot (e.g. :math:`dN/dE`, :math:`E^2\ dN/dE`) is typically adjusted through the `sed_type` quantity. See :ref:`sedtypes` for a list of options. +.. STI + Short for "stable time interval": it indicates a continuous time interval of data + acquisition for which the instrument response files are supposed to be constant. + Stacked Analysis In a stacked analysis individual observations are reduced to datasets which are then stacked to produce a single reduced dataset. The latter is then used diff --git a/docs/user-guide/scripts/index.rst b/docs/user-guide/scripts/index.rst index f89375e14e..3ce4c0421e 100644 --- a/docs/user-guide/scripts/index.rst +++ b/docs/user-guide/scripts/index.rst @@ -157,7 +157,7 @@ just import the functionality you need and call it, like this: >>> from gammapy.stats import CashCountsStatistic >>> CashCountsStatistic(n_on=10, mu_bkg=4.2).sqrt_ts - 2.397918129147546 + np.float64(2.397918129147546) If you imagine that the actual computation involves many lines of code (and not just a one-line function call), and that you need to do this computation diff --git a/docs/user-guide/utils.rst b/docs/user-guide/utils.rst index b9733ad3b5..dcb01e7dcd 100644 --- a/docs/user-guide/utils.rst +++ b/docs/user-guide/utils.rst @@ -39,12 +39,12 @@ In Gammapy, `astropy.time.Time` objects are used to represent times: Note that Astropy chose ``format='isot'`` and ``scale='utc'`` as default and in Gammapy these are also the recommended format and time scale. -.. warning:: +.. .. warning:: - Actually what's written here is not true. In CTA it hasn't been decided if +.. Actually what's written here is not true. In CTAO, it hasn't been decided if times will be in ``utc`` or ``tt`` (terrestrial time) format. - Here's a reminder that this needs to be settled / updated: +.. Here's a reminder that this needs to be settled / updated: https://github.com/gammapy/gammapy/issues/284 diff --git a/environment-dev.yml b/environment-dev.yml index be53f3a62e..c191b7aab9 100644 --- a/environment-dev.yml +++ b/environment-dev.yml @@ -9,7 +9,7 @@ name: gammapy-dev channels: - conda-forge - - sherpa + - https://cxc.cfa.harvard.edu/conda/sherpa variables: PYTHONNOUSERSITE: "1" @@ -77,7 +77,7 @@ dependencies: - pyinstrument - memray - pip: - - sherpa + - sherpa>=4.17 - pytest-sphinx - ray[default]>=2.9 - PyGithub diff --git a/examples/tutorials/analysis-1d/cta_sensitivity.py b/examples/tutorials/analysis-1d/cta_sensitivity.py index 3d755d3dd0..1317c8c431 100644 --- a/examples/tutorials/analysis-1d/cta_sensitivity.py +++ b/examples/tutorials/analysis-1d/cta_sensitivity.py @@ -2,12 +2,12 @@ Point source sensitivity ======================== -Estimate the CTA sensitivity for a point-like IRF at a fixed zenith angle and fixed offset. +Estimate the CTAO sensitivity for a point-like IRF at a fixed zenith angle and fixed offset. Introduction ------------ -This notebook explains how to estimate the CTA sensitivity for a +This notebook explains how to estimate the CTAO sensitivity for a point-like IRF at a fixed zenith angle and fixed offset, using the full containment IRFs distributed for the CTA 1DC. The significance is computed for a 1D analysis (ON-OFF regions) with the Li&Ma formula. @@ -21,6 +21,7 @@ - `~gammapy.estimators.SensitivityEstimator` """ + from cycler import cycler import numpy as np import astropy.units as u @@ -81,7 +82,7 @@ # Load IRFs and prepare dataset # ----------------------------- # -# We extract the 1D IRFs from the full 3D IRFs provided by CTA. +# We extract the 1D IRFs from the full 3D IRFs provided by CTAO. # irfs = load_irf_dict_from_file( @@ -153,7 +154,7 @@ # # We assume an alpha of 0.2 (ratio between ON and OFF area). We then run the sensitivity estimator. # -# These are the conditions imposed in standard CTA sensitivity computations. +# These are the conditions imposed in standard CTAO sensitivity computations. sensitivity_estimator = SensitivityEstimator( gamma_min=10, @@ -182,13 +183,11 @@ # - fig, ax = plt.subplots() ax.set_prop_cycle(cycler("marker", "s*v") + cycler("color", "rgb")) for criterion in ("significance", "gamma", "bkg"): - mask = sensitivity_table["criterion"] == criterion t = sensitivity_table[mask] @@ -264,7 +263,9 @@ # flux_points = FluxPoints.from_table( - sensitivity_table, sed_type="e2dnde", reference_model=sensitivity_estimator.spectrum + sensitivity_table, + sed_type="e2dnde", + reference_model=sensitivity_estimator.spectral_model, ) print( f"Integral sensitivity in {livetime:.2f} above {energy_axis.edges[0]:.2e} " diff --git a/examples/tutorials/analysis-1d/spectral_analysis.py b/examples/tutorials/analysis-1d/spectral_analysis.py index f12aeb9236..97943caac8 100644 --- a/examples/tutorials/analysis-1d/spectral_analysis.py +++ b/examples/tutorials/analysis-1d/spectral_analysis.py @@ -219,7 +219,7 @@ containment_correction=True, selection=["counts", "exposure", "edisp"] ) bkg_maker = ReflectedRegionsBackgroundMaker(exclusion_mask=exclusion_mask) -safe_mask_masker = SafeMaskMaker(methods=["aeff-max"], aeff_percent=10) +safe_mask_maker = SafeMaskMaker(methods=["aeff-max"], aeff_percent=10) # %%time datasets = Datasets() @@ -227,7 +227,7 @@ for obs_id, observation in zip(obs_ids, observations): dataset = dataset_maker.run(dataset_empty.copy(name=str(obs_id)), observation) dataset_on_off = bkg_maker.run(dataset, observation) - dataset_on_off = safe_mask_masker.run(dataset_on_off, observation) + dataset_on_off = safe_mask_maker.run(dataset_on_off, observation) datasets.append(dataset_on_off) print(datasets) diff --git a/examples/tutorials/analysis-1d/spectrum_simulation.py b/examples/tutorials/analysis-1d/spectrum_simulation.py index 4bb549e631..522f073c48 100644 --- a/examples/tutorials/analysis-1d/spectrum_simulation.py +++ b/examples/tutorials/analysis-1d/spectrum_simulation.py @@ -25,12 +25,12 @@ This can be done to check the feasibility of a measurement, to test whether fitted parameters really provide a good fit to the data etc. -Here we will see how to perform a 1D spectral simulation of a CTA +Here we will see how to perform a 1D spectral simulation of a CTAO observation, in particular, we will generate OFF observations following -the template background stored in the CTA IRFs. +the template background stored in the CTAO IRFs. **Objective: simulate a number of spectral ON-OFF observations of a -source with a power-law spectral model with CTA using the CTA 1DC +source with a power-law spectral model with CTAO using the CTA 1DC response, fit them with the assumed spectral model and check that the distribution of fitted parameters is consistent with the input values.** diff --git a/examples/tutorials/analysis-2d/detect.py b/examples/tutorials/analysis-2d/detect.py index 888840fb6d..52b8948f3d 100644 --- a/examples/tutorials/analysis-2d/detect.py +++ b/examples/tutorials/analysis-2d/detect.py @@ -130,7 +130,7 @@ # TS map estimation # ----------------- # -# The Test Statistic, TS = 2 ∆ log L (`Mattox et +# The Test Statistic, :math:`TS = 2 \Delta log L` (`Mattox et # al. 1996 `__), # compares the likelihood function L optimized with and without a given # source. The TS map is computed by fitting by a single amplitude @@ -150,18 +150,47 @@ spectral_model = PowerLawSpectralModel(amplitude="1e-22 cm-2 s-1 keV-1", index=2) model = SkyModel(spatial_model=spatial_model, spectral_model=spectral_model) + +###################################################################### +# Here we show a full configuration of the estimator. We remind the user of the meaning +# of the various quantities: +# +# - ``model``: a `~gammapy.modeling.models.SkyModel` which is converted to a source model kernel +# - ``kernel_width``: the width for the above kernel +# - ``n_sigma``: number of sigma for the flux error +# - ``n_sigma_ul``: the number of sigma for the flux upper limits +# - ``selection_optional``: what optional maps to compute +# - ``n_jobs``: for running in parallel, the number of processes used for the computation +# - ``sum_over_energy_groups``: to sum over the energy groups or fit the `norm` on the full energy cube + + # %%time estimator = TSMapEstimator( - model, + model=model, kernel_width="1 deg", energy_edges=[10, 500] * u.GeV, + n_sigma=1, + n_sigma_ul=2, + selection_optional=None, + n_jobs=1, + sum_over_energy_groups=True, ) + + maps = estimator.run(dataset) +###################################################################### +# Accessing and visualising results +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Below we print the result of the `~gammapy.estimators.TSMapEstimator`. We have access to a number of +# different quantities, as shown below. We can also access the quantities names +# through ``map_result.available_quantities``. +# + +print(maps) ###################################################################### -# Plot resulting images -# ~~~~~~~~~~~~~~~~~~~~~ # fig, (ax1, ax2, ax3) = plt.subplots( @@ -179,6 +208,19 @@ ax3.set_title("Iteration map") plt.show() + +###################################################################### +# The flux in each pixel is obtained by multiplying a reference model with a +# normalisation factor: + +print(maps.reference_model) + +###################################################################### +# +maps.norm.plot(add_cbar=True, stretch="sqrt") +plt.show() + + ###################################################################### # Source candidates # ----------------- diff --git a/examples/tutorials/analysis-2d/modeling_2D.py b/examples/tutorials/analysis-2d/modeling_2D.py index 95cd689853..f1de504a39 100644 --- a/examples/tutorials/analysis-2d/modeling_2D.py +++ b/examples/tutorials/analysis-2d/modeling_2D.py @@ -64,7 +64,7 @@ # Now, we create a config file for out analysis. You may load this from # disc if you have a pre-defined config file. # -# Here, we use 3 simulated CTA runs of the galactic center. +# Here, we use 3 simulated CTAO runs of the galactic center. # config = AnalysisConfig() diff --git a/examples/tutorials/analysis-3d/cta_data_analysis.py b/examples/tutorials/analysis-3d/cta_data_analysis.py index fdecc7637f..6baa312c3d 100644 --- a/examples/tutorials/analysis-3d/cta_data_analysis.py +++ b/examples/tutorials/analysis-3d/cta_data_analysis.py @@ -8,11 +8,11 @@ ------------ **This notebook shows an example how to make a sky image and spectrum -for simulated CTA data with Gammapy.** +for simulated CTAO data with Gammapy.** The dataset we will use is three observation runs on the Galactic Center. This is a tiny (and thus quick to process and play with and -learn) subset of the simulated CTA dataset that was produced for the +learn) subset of the simulated CTAO dataset that was produced for the first data challenge in August 2017. """ @@ -401,7 +401,7 @@ # What next? # ---------- # -# - This notebook showed an example of a first CTA analysis with Gammapy, +# - This notebook showed an example of a first CTAO analysis with Gammapy, # using simulated 1DC data. # - Let us know if you have any questions or issues! # diff --git a/examples/tutorials/analysis-3d/event_sampling_nrg_depend_models.py b/examples/tutorials/analysis-3d/event_sampling_nrg_depend_models.py index 36c5e4aac2..b9e9065e42 100644 --- a/examples/tutorials/analysis-3d/event_sampling_nrg_depend_models.py +++ b/examples/tutorials/analysis-3d/event_sampling_nrg_depend_models.py @@ -253,7 +253,7 @@ # Define an observation and make a dataset # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # -# In the following, we define an observation of 1 hr with CTA in the +# In the following, we define an observation of 1 hr with CTAO in the # alpha-configuration for the south array, and we also create a dataset # to be passed to the event sampler. The full `SkyModel` created above # is passed to the dataset. diff --git a/examples/tutorials/analysis-time/pulsar_analysis.py b/examples/tutorials/analysis-time/pulsar_analysis.py index c031536ad7..c52a63c2c1 100644 --- a/examples/tutorials/analysis-time/pulsar_analysis.py +++ b/examples/tutorials/analysis-time/pulsar_analysis.py @@ -331,7 +331,7 @@ ###################################################################### -# Note that here we are lacking statistic because we only use one run of CTA. +# Note that here we are lacking statistic because we only use one run of CTAO. # # Phase-resolved spectrum # ----------------------- diff --git a/examples/tutorials/analysis-time/Time_resolved_spectroscopy.py b/examples/tutorials/analysis-time/time_resolved_spectroscopy.py similarity index 97% rename from examples/tutorials/analysis-time/Time_resolved_spectroscopy.py rename to examples/tutorials/analysis-time/time_resolved_spectroscopy.py index 32359b2958..93cad308d2 100644 --- a/examples/tutorials/analysis-time/Time_resolved_spectroscopy.py +++ b/examples/tutorials/analysis-time/time_resolved_spectroscopy.py @@ -1,9 +1,6 @@ """ -A time resolved spectroscopy estimator -====================================== - -Aim ---- +Time resolved spectroscopy estimator +==================================== Perform spectral fits of a blazar in different time bins to investigate spectral changes during flares. @@ -257,7 +254,6 @@ def time_resolved_spectroscopy(datasets, model, time_intervals): def create_table(time_intervals, fit_result): - t = QTable() t["tstart"] = np.array(time_intervals).T[0] @@ -325,11 +321,18 @@ def create_table(time_intervals, fit_result): # amplitude # +plt.errorbar( + table["amplitude"], + table["index"], + xerr=table["amplitude_err"], + yerr=table["index_err"], + linestyle=":", + linewidth=0.5, +) plt.scatter(table["amplitude"], table["index"], c=time_axis.center.value) -plt.plot(table["amplitude"], table["index"], linewidth=0.5) plt.xlabel("amplitude") plt.ylabel("index") -plt.colorbar() +plt.colorbar().set_label("time") plt.show() diff --git a/examples/tutorials/analysis-time/Variability_estimation.py b/examples/tutorials/analysis-time/variability_estimation.py similarity index 99% rename from examples/tutorials/analysis-time/Variability_estimation.py rename to examples/tutorials/analysis-time/variability_estimation.py index 60d7e3a54a..0dd59bf9d2 100644 --- a/examples/tutorials/analysis-time/Variability_estimation.py +++ b/examples/tutorials/analysis-time/variability_estimation.py @@ -38,7 +38,6 @@ # ----- # As usual, we’ll start with some general imports… - import numpy as np from astropy.stats import bayesian_blocks from astropy.time import Time diff --git a/examples/tutorials/api/estimators.py b/examples/tutorials/api/estimators.py new file mode 100644 index 0000000000..01cb717955 --- /dev/null +++ b/examples/tutorials/api/estimators.py @@ -0,0 +1,301 @@ +""" +Estimators +========== + +This tutorial provides an overview of the `Estimator` API. All estimators live in the +`gammapy.estimators` sub-module, offering a range of algorithms and classes for high-level flux and +significance estimation. This is accomplished through a common functionality allowing the estimation of +flux points, light curves, flux maps and profiles via a common API. + + + +Key Features +------------ + +- **Hypothesis Testing**: Estimations are based on testing a reference model + against a null hypothesis, deriving flux and significance values. + +- **Estimation via Two Methods**: + + - **Model Fitting (Forward Folding)**: Refit the flux of a model component + within specified energy, time, or spatial regions. + - **Excess Calculation (Backward Folding)**: Use the analytical solution by Li and Ma + for significance based on excess counts, currently available in `~gammapy.estimators.ExcessMapEstimator`. + +For further information on these details please refer to :doc:`/user-guide/estimators`. + +The setup +--------- + +""" + +import numpy as np +import matplotlib.pyplot as plt +from astropy import units as u +from IPython.display import display +from gammapy.datasets import SpectrumDatasetOnOff, Datasets, MapDataset +from gammapy.estimators import ( + FluxPointsEstimator, + ExcessMapEstimator, + FluxPoints, +) +from gammapy.modeling import Fit, Parameter +from gammapy.modeling.models import SkyModel, PowerLawSpectralModel +from gammapy.utils.scripts import make_path + + +###################################################################### +# Flux Points Estimation +# ---------------------- +# +# We start with a simple example for flux points estimation taking multiple datasets into account. +# In this section we show the steps to estimate the flux points. +# First we read the pre-computed datasets from `$GAMMAPY_DATA`. +# + +datasets = Datasets() +path = make_path("$GAMMAPY_DATA/joint-crab/spectra/hess/") + +for filename in path.glob("pha_obs*.fits"): + dataset = SpectrumDatasetOnOff.read(filename) + datasets.append(dataset) + +###################################################################### +# Next we define a spectral model and set it on the datasets: +# + +pwl = PowerLawSpectralModel(index=2.7, amplitude="5e-11 cm-2 s-1 TeV-1") +datasets.models = SkyModel(spectral_model=pwl, name="crab") + +###################################################################### +# Before using the estimators, it is necessary to first ensure that the model is properly +# fitted. This applies to all scenarios, including light curve estimation. To optimize the +# model parameters to best fit the data we utilise the following: +# + +fit = Fit() +fit_result = fit.optimize(datasets=datasets) +print(fit_result) + +###################################################################### +# A fully configured Flux Points Estimation +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# The `~gammapy.estimators.FluxPointsEstimator` estimates flux points for a given list of datasets, +# energies and spectral model. The most simple way to call the estimator is by defining both +# the name of the ``source`` and its ``energy_edges``. +# Here we prepare a full configuration of the flux point estimation. +# Firstly we define the ``backend`` for the fit: +# + +fit = Fit( + optimize_opts={"backend": "minuit"}, + confidence_opts={"backend": "scipy"}, +) + +###################################################################### +# Define the fully configured flux points estimator: +# + +energy_edges = np.geomspace(0.7, 100, 9) * u.TeV +norm = Parameter(name="norm", value=1.0) + +fp_estimator = FluxPointsEstimator( + source="crab", + energy_edges=energy_edges, + n_sigma=1, + n_sigma_ul=2, + selection_optional="all", + fit=fit, + norm=norm, +) + +###################################################################### +# The ``norm`` parameter can be adjusted in a few different ways. For example, we can change its +# minimum and maximum values that it scans over, as follows. +# + +fp_estimator.norm.scan_min = 0.1 +fp_estimator.norm.scan_max = 3 + +###################################################################### +# Note: The default scan range of the norm parameter is between 0.1 to 10. In case the upper +# limit values lie outside this range, nan values will be returned. It may thus be useful to +# increase this range, specially for the computation of upper limits from weak sources. +# +# The various quantities utilised in this tutorial are described here: +# +# - ``source``: which source from the model to compute the flux points for +# - ``energy_edges``: edges of the flux points energy bins +# - ``n_sigma``: number of sigma for the flux error +# - ``n_sigma_ul``: the number of sigma for the flux upper limits +# - ``selection_optional``: what additional maps to compute +# - ``fit``: the fit instance (as defined above) +# - ``reoptimize``: whether to reoptimize the flux points with other model parameters, aside from the ``norm`` +# - ``norm``: normalisation parameter for the fit +# +# **Important note**: the output ``energy_edges`` are taken from the parent dataset energy bins, +# selecting the bins closest to the requested ``energy_edges``. To match the input bins directly, +# specific binning must be defined based on the parent dataset geometry. This could be done in the following way: +# `energy_edges = datasets[0].geoms["geom"].axes["energy"].downsample(factor=5).edges` +# + + +# %%time +fp_result = fp_estimator.run(datasets=datasets) + +###################################################################### +# Accessing and visualising the results +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +print(fp_result) + +###################################################################### +# We can specify the SED type to plot: +# +fp_result.plot(sed_type="dnde") +plt.show() + +###################################################################### +# We can also access +# the quantities names through ``fp_result.available_quantities``. +# Here we show how you can plot a different plot type and define the axes units, +# we also overlay the TS profile. + +ax = plt.subplot() +ax.xaxis.set_units(u.eV) +ax.yaxis.set_units(u.Unit("TeV cm-2 s-1")) +fp_result.plot(ax=ax, sed_type="e2dnde", color="tab:orange") +fp_result.plot_ts_profiles(sed_type="e2dnde") +plt.show() + +###################################################################### +# The actual data members are N-dimensional `~gammapy.maps.region.ndmap.RegionNDMap` objects. So you can +# also plot them: + +print(type(fp_result.dnde)) + +###################################################################### +# +fp_result.dnde.plot() +plt.show() + +###################################################################### +# From the above, we can see that we access to many quantities. + + +###################################################################### +# Access the data: + +print(fp_result.e2dnde.quantity.to("TeV cm-2 s-1")) + +###################################################################### +# +print(fp_result.dnde.quantity.shape) + +###################################################################### +# +print(fp_result.dnde.quantity[:, 0, 0]) + +###################################################################### +# Or even extract an energy range: + +fp_result.dnde.slice_by_idx({"energy": slice(3, 10)}) + + +###################################################################### +# A note on the internal representation +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# The result contains a reference spectral model, which defines the spectral shape. +# Typically, it is the best fit model: + +print(fp_result.reference_model) + +###################################################################### +# `~gammapy.estimators.FluxPoints` are the represented by the "norm" scaling factor with +# respect to the reference model: + +fp_result.norm.plot() +plt.show() + +###################################################################### +# Dataset specific quantities ("counts like") +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# While the flux estimate and associated errors are common to all datasets, +# the result also stores some dataset specific quantities, which can be useful +# for debugging. +# Here we remind the user of the meaning of the forthcoming quantities: +# +# - ``counts``: predicted counts from the null hypothesis, +# - ``npred``: predicted number of counts from best fit hypothesis, +# - ``npred_excess``: predicted number of excess counts from best fit hypothesis. +# +# The `~gammapy.maps.region.ndmap.RegionNDMap` allows for plotting of multidimensional data +# as well, by specifying the primary ``axis_name``: + + +fp_result.counts.plot(axis_name="energy") +plt.show() + +###################################################################### +# +fp_result.npred.plot(axis_name="energy") +plt.show() + +###################################################################### +# +fp_result.npred_excess.plot(axis_name="energy") +plt.show() + +###################################################################### +# Table conversion +# ~~~~~~~~~~~~~~~~ +# +# Flux points can be converted to tables: +# + +table = fp_result.to_table(sed_type="flux", format="gadf-sed") +display(table) + +###################################################################### +# +table = fp_result.to_table(sed_type="likelihood", format="gadf-sed", formatted=True) +display(table) + + +###################################################################### +# Common API +# ---------- +# In `GAMMAPY_DATA` we have access to other `~gammapy.estimators.FluxPoints` objects +# which have been created utilising the above method. Here we read the PKS 2155-304 light curve +# and create a `~gammapy.estimators.FluxMaps` object and show the data structure of such objects. +# We emphasize that these follow a very similar structure. +# + +###################################################################### +# Load the light curve for the PKS 2155-304 as a `~gammapy.estimators.FluxPoints` object. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# + +lightcurve = FluxPoints.read( + "$GAMMAPY_DATA/estimators/pks2155_hess_lc/pks2155_hess_lc.fits", format="lightcurve" +) + +display(lightcurve.available_quantities) + + +###################################################################### +# Create a `~gammapy.estimators.FluxMaps` object through one of the estimators. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# + +dataset = MapDataset.read("$GAMMAPY_DATA/cta-1dc-gc/cta-1dc-gc.fits.gz") +estimator = ExcessMapEstimator(correlation_radius="0.1 deg") +result = estimator.run(dataset) +display(result) + +###################################################################### +# +display(result.available_quantities) diff --git a/examples/tutorials/api/models.py b/examples/tutorials/api/models.py index e79a289c92..cec04729d8 100644 --- a/examples/tutorials/api/models.py +++ b/examples/tutorials/api/models.py @@ -294,7 +294,7 @@ ###################################################################### -# All spatial models have an associated sky region to it e.g. to +# All spatial models have an associated sky region to it e.g. to # illustrate the extension of the model on a sky image. The returned object # is an `~regions.SkyRegion` object: # @@ -319,7 +319,7 @@ ###################################################################### -# The `~gammapy.modeling.models.SpatialModel.to_region()` method can also be useful to write e.g. ds9 region +# The `~gammapy.modeling.models.SpatialModel.to_region()` method can also be useful to write e.g. ds9 region # files using `write_ds9` from the `regions` package: # @@ -444,7 +444,7 @@ # parameter resides on the `~gammapy.modeling.models.SpectralModel`, specifying a spectral # component is compulsory. The temporal and spatial components are # optional. The temporal model needs to be specified only for timing -# analysis. In some cases (e.g. when doing a spectral analysis) there is +# analysis. In some cases (e.g. when doing a spectral analysis) there is # no need for a spatial component either, and only a spectral model is # associated with the source. # @@ -456,7 +456,7 @@ ###################################################################### # Additionally the spatial model of `~gammapy.modeling.models.SkyModel` # can be used to represent source models based on templates, where the -# spatial and energy axes are correlated. It can be created e.g. from an +# spatial and energy axes are correlated. It can be created e.g. from an # existing FITS file: # @@ -531,7 +531,7 @@ ###################################################################### -# **Note:** To make the access by name unambiguous, models are required to +# **Note:** To make the access by name unambiguous, models are required to # have a unique name, otherwise an error will be thrown. # # To see which models are available you can use the ``.names`` attribute: @@ -661,7 +661,7 @@ class MyCustomSpectralModel(SpectralModel): """ tag = "MyCustomSpectralModel" - amplitude = Parameter("amplitude", "1e-12 cm-2 s-1 TeV-1", min=0, is_norm=True) + amplitude = Parameter("amplitude", "1e-12 cm-2 s-1 TeV-1", min=0) index = Parameter("index", 2, min=0) reference = Parameter("reference", "1 TeV", frozen=True) mean = Parameter("mean", "1 TeV", min=0) @@ -783,7 +783,6 @@ class MyCustomGaussianModel(SpatialModel): @staticmethod def evaluate(lon, lat, energy, lon_0, lat_0, sigma_1TeV, sigma_10TeV): - sep = angular_separation(lon, lat, lon_0, lat_0) # Compute sigma for the given energy using linear interpolation in log energy @@ -839,4 +838,4 @@ def evaluate(lon, lat, energy, lon_0, lat_0, sigma_1TeV, sigma_10TeV): @property def evaluation_radius(self): """Evaluation radius (`~astropy.coordinates.Angle`).""" - return 5 * np.max([self.sigma_1TeV.value, self.sigma_10TeV.value]) * u.deg \ No newline at end of file + return 5 * np.max([self.sigma_1TeV.value, self.sigma_10TeV.value]) * u.deg diff --git a/examples/tutorials/data/cta.py b/examples/tutorials/data/cta.py index ec36908e90..afc77daeb2 100644 --- a/examples/tutorials/data/cta.py +++ b/examples/tutorials/data/cta.py @@ -1,58 +1,58 @@ """ -CTA with Gammapy -================ +CTAO with Gammapy +================= -Access and inspect CTA data and instrument response functions (IRFs) using Gammapy. +Access and inspect CTAO data and instrument response functions (IRFs) using Gammapy. Introduction ------------ -The `Cherenkov Telescope Array -(CTA) `__ is the next generation +The `Cherenkov Telescope Array Observatory (CTAO) `__ is the next generation ground-based observatory for gamma-ray astronomy. Gammapy is the core -library for the Cherenkov Telescope Array (CTA) science tools +library for the Cherenkov Telescope Array Observatory (CTAO) science tools (`2017ICRC…35..766D `__ and `CTAO Press -Release `__). +Release `__). -CTA will start taking data in the coming years. For now, to learn how to -analyse CTA data and to use Gammapy, if you are a member of the CTA -consortium, you can use the simulated dataset from the CTA first data -challenge which ran in 2017 and 2018. +CTAO will start taking data in the coming years. For now, to learn how to +analyse CTAO data and to use Gammapy, if you are a member of the CTAO +consortium, you can use the simulated dataset from: -- https://forge.in2p3.fr/projects/data-challenge-1-dc-1/wiki (CTA - internal) +- the CTA first data challenge which ran in 2017 and 2018 (https://forge.in2p3.fr/projects/data-challenge-1-dc-1/wiki), +- the CTAO Science Data Challenge of 2024 (https://ctaoobservatory.sharepoint.com/:f:/r/sites/share-open-data/Shared%20Documents/Reference%20datasets/Internal%20Science%20Data%20Challenge?csf=1&web=1&e=gNuFzI) Gammapy fully supports the FITS data formats (events, IRFs) used in CTA -1DC. The XML sky model format is not supported, but are also not needed +1DC and SDC. The XML sky model format is not supported, but are also not needed to analyse the data, you have to specify your model via the Gammapy YAML model format, or Python code, as shown below. -You can use Gammapy to simulate CTA data and evaluate CTA performance -using the CTA response files available here: +You can also use Gammapy to simulate CTAO data and evaluate CTAO performance +using the CTAO response files. Two sets of responses are available for different +array layouts: -- https://www.cta-observatory.org/science/cta-performance/ +- the Omega configuration (prod3b, 2016): https://zenodo.org/records/5163273, +- the Alpha configuration (prod5, 2021): https://zenodo.org/records/5499840. + +They are all fully supported by Gammapy. -The current FITS format `CTA-Performance-prod3b-v2-FITS.tar` is fully -supported by Gammapy, as shown below. Tutorial overview ----------------- -This notebook shows how to access CTA data and instrument response +This notebook shows how to access CTAO data and instrument response functions (IRFs) using Gammapy, and gives some examples how to quick -look the content of CTA files, especially to see the shape of CTA IRFs. +look the content of CTAO files, especially to see the shape of CTAO IRFs. At the end of the notebooks, we give several links to other tutorial -notebooks that show how to simulate CTA data and how to evaluate CTA -observability and sensitivity, or how to analyse CTA data. +notebooks that show how to simulate CTAO data and how to evaluate CTAO +observability and sensitivity, or how to analyse CTAO data. -Note that the FITS data and IRF format currently used by CTA is the one +Note that the FITS data and IRF format currently used by CTAO is the one documented at https://gamma-astro-data-formats.readthedocs.io/, and is -also used by H.E.S.S. and other imaging atmospheric Cherenkov telescopes +also used by H.E.S.S. and other Imaging Atmospheric Cherenkov Telescopes (IACTs). So if you see other Gammapy tutorials using e.g. H.E.S.S. -example data, know that they also apply to CTA, all you have to do is to -change the loaded data or IRFs to CTA. +example data, know that they also apply to CTAO, all you have to do is to +change the loaded data or IRFs to CTAO. Setup ----- @@ -95,18 +95,18 @@ # After download, follow the instructions how to `untar` the files, and # set a `CTADATA` environment variable to point to the data. # -# For convenience, since the 1DC data files are large, and not publicly +# **For convenience**, since the 1DC data files are large and not publicly # available to anyone, we have taken a tiny subset of the CTA 1DC data, # four observations with the southern array from the GPS survey, pointing -# near the Galactic center, and included them at `$GAMMAPY_DATA/cta-1dc` +# near the Galactic center, and **included them at `$GAMMAPY_DATA/cta-1dc`** # which you get via `gammapy download datasets`. # # Files # ~~~~~ # # Next we will show a quick overview of the files and how to load them, -# and some quick look plots showing the shape of the CTA IRFs. How to do -# CTA simulations and analyses is shown in other tutorials, see links at +# and some quick look plots showing the shape of the CTAO IRFs. How to do +# CTAO simulations and analyses is shown in other tutorials, see links at # the end of this notebook. # @@ -164,9 +164,9 @@ # equivalently via the `~gammapy.data.EventList` class by specifying the # EVENTS filename. # -# The quick-look `events.peek()` plot below shows that CTA has a field +# The quick-look `events.peek()` plot below shows that CTAO has a field # of view of a few degrees, and two energy thresholds, one significantly -# below 100 GeV where the CTA large-size telescopes (LSTs) detect events, +# below 100 GeV where the CTAO large-size telescopes (LSTs) detect events, # and a second one near 100 GeV where the mid-sized telescopes (MSTs) # start to detect events. # @@ -206,7 +206,7 @@ # IRFs # ---- # -# The CTA instrument response functions (IRFs) are given as FITS files in +# The CTAO instrument response functions (IRFs) are given as FITS files in # the `caldb` folder, the following IRFs are available: # # - effective area @@ -216,12 +216,12 @@ # # Notes: # -# - The IRFs contain the energy and offset dependence of the CTA response -# - CTA 1DC was based on an early version of the CTA FITS responses +# - The IRFs contain the energy and offset dependence of the CTAO response +# - CTA 1DC was based on an early version of the CTAO FITS responses # produced in 2017, improvements have been made since. # - The point spread function was approximated by a Gaussian shape # - The background is from hadronic and electron air shower events that -# pass CTA selection cuts. It was given as a function of field of view +# pass CTAO selection cuts. It was given as a function of field of view # coordinates, although it is radially symmetric. # - The energy dispersion in CTA 1DC is noisy at low energy, leading to # unreliable spectral points for some analyses. @@ -355,28 +355,28 @@ ###################################################################### -# CTA performance files -# --------------------- +# Latest CTAO performance files +# ----------------------------- # -# CTA 1DC is useful to learn how to analyse CTA data. But to do -# simulations and studies for CTA now, you should get the most recent CTA -# IRFs in FITS format from -# https://www.cta-observatory.org/science/cta-performance/ +# CTA 1DC is useful to learn how to analyse CTAO data. But to do +# simulations and studies for CTAO now, you should get the most recent CTAO +# IRFs in FITS format from https://www.ctao.org/for-scientists/performance/. # -# If you want to run the download and examples in the next code cells, -# remove the # to uncomment. +# If you want to use other response files, the following code cells (remove the # to uncomment) +# explain how to proceed. This example is made with the Alpha configuration (Prod5). # -# !curl -O https://www.cta-observatory.org/wp-content/uploads/2019/04/CTA-Performance-prod3b-v2-FITS.tar.gz - -# !tar xf CTA-Performance-prod3b-v2-FITS.tar.gz +# !curl -o cta-prod5-zenodo-fitsonly-v0.1.zip https://zenodo.org/records/5499840/files/cta-prod5-zenodo-fitsonly-v0.1.zip +# !unzip cta-prod5-zenodo-fitsonly-v0.1.zip +# !ls fits/ -# !ls caldb/data/cta/prod3b-v2/bcf +# !tar xf fits/CTA-Performance-prod5-v0.1-South-40deg.FITS.tar.gz -C fits/. +# !ls fits/*.fits.gz -# irfs1 = load_irf_dict_from_file("caldb/data/cta/prod3b-v2/bcf/South_z20_50h/irf_file.fits") +# irfs1 = load_irf_dict_from_file("fits/Prod5-South-40deg-SouthAz-14MSTs37SSTs.180000s-v0.1.fits.gz") # irfs1["aeff"].plot_energy_dependence() -# irfs2 = load_irf_dict_from_file("caldb/data/cta/prod3b-v2/bcf/South_z40_50h/irf_file.fits") +# irfs2 = load_irf_dict_from_file("fits/Prod5-South-40deg-SouthAz-14MSTs37SSTs.1800s-v0.1.fits.gz") # irfs2["aeff"].plot_energy_dependence() @@ -391,11 +391,11 @@ # - Use `~gammapy.data.EventList.pointing_radec` to find the pointing position of this # observation, and use `astropy.coordinates.SkyCoord` methods to find # the field of view offset of the highest-energy event. -# - What is the effective area and PSF 68% containment radius of CTA at 1 +# - What is the effective area and PSF 68% containment radius of CTAO at 1 # TeV for the `South_z20_50h` configuration used for the CTA 1DC # simulation? -# - Get the latest CTA FITS performance files from -# https://www.cta-observatory.org/science/cta-performance/ and run the +# - Get the latest CTAO FITS performance files from +# https://www.ctao.org/for-scientists/performance/ and run the # code example above. Make an effective area ratio plot of 40 deg # zenith versus 20 deg zenith for the `South_z40_50h` and # `South_z20_50h` configurations. @@ -412,7 +412,7 @@ # :doc:`/tutorials/starting/analysis_1` and # :doc:`/tutorials/starting/analysis_2` or any other # Gammapy analysis tutorial. -# - Learn how to evaluate CTA observability and sensitivity with +# - Learn how to evaluate CTAO observability and sensitivity with # :doc:`/tutorials/analysis-3d/simulate_3d`, # :doc:`/tutorials/analysis-1d/spectrum_simulation` # or :doc:`/tutorials/analysis-1d/cta_sensitivity`. diff --git a/examples/tutorials/data/hawc.py b/examples/tutorials/data/hawc.py index e3c85fa87b..6546ca2125 100644 --- a/examples/tutorials/data/hawc.py +++ b/examples/tutorials/data/hawc.py @@ -1,7 +1,12 @@ """ HAWC with Gammapy -===================== -Explore HAWC event lists and IRFs and perform the data reduction steps. +================= + +Explore HAWC event lists and instrument response functions (IRFs), then perform the +data reduction steps. + +Introduction +------------ `HAWC `__ is an array of water Cherenkov detectors located in Mexico that detects gamma-rays @@ -15,20 +20,25 @@ was publicly `released `__. This dataset is 1 GB in size, so only a subset will be used here as an example. +Tutorial overview +----------------- + This notebook is a quick introduction to HAWC data analysis with Gammapy. It briefly describes the HAWC data and how to access it, and then uses a -subset of it to produce a MapDataset, to show how the data reduction is performed. +subset of the data to produce a `~gammapy.datasets.MapDataset`, to show how the +data reduction is performed. -The HAWC data release contains events where energy is estimated using -two different algorithms, referred to as "NN" and "GP" (see this `paper `__ +The HAWC data release contains events where the energy is estimated using +two different algorithms, referred to as "NN" and "GP" (see this +`paper `__ for a detailed description). These two event classes are not independent, meaning that -events are repeated between the NN and GP datasets. So these data should never +events are repeated between the NN and GP datasets. Therefore, these data should never be analyzed jointly, and one of the two estimators needs to be chosen before proceeding. -Once the data has been reduced to a MapDataset, there are no differences +Once the data has been reduced to a `~gammapy.datasets.MapDataset`, there are no differences in the way that HAWC data is handled with respect to data from any other -observatory, such as H.E.S.S. or CTA. +observatory, such as H.E.S.S. or CTAO. HAWC data access and reduction @@ -61,7 +71,7 @@ ###################################################################### -# A useful way to organize the relevant files are the index tables. The +# A useful way to organize the relevant files are with index tables. The # observation index table contains information on each particular observation, # such as the run ID. The HDU index table has a row per # relevant file (i.e., events, effective area, psf…) and contains the path @@ -80,21 +90,28 @@ ###################################################################### # Load the tables +# ~~~~~~~~~~~~~~~ data_path = "$GAMMAPY_DATA/hawc/crab_events_pass4/" hdu_filename = f"hdu-index-table-{energy_estimator}-Crab.fits.gz" obs_filename = f"obs-index-table-{energy_estimator}-Crab.fits.gz" -# there is only one observation table +###################################################################### +# There is only one observation table obs_table = ObservationTable.read(data_path + obs_filename) -# we read the hdu index table of fHit bin number 6 +###################################################################### +# The remainder of this tutorial utilises just one fHit value, however, +# for a regular analysis you should combine all fHit bins. Here, +# we utilise fHit bin number 6. We start by reading the HDU index table +# of this fHit bin + fHit = 6 hdu_table = HDUIndexTable.read(data_path + hdu_filename, hdu=fHit) ###################################################################### -# From the tables, we can create a Datastore +# From the tables, we can create a `~gammapy.data.DataStore`. data_store = DataStore(hdu_table=hdu_table, obs_table=obs_table) @@ -107,31 +124,31 @@ obs = data_store.get_observations()[0] ###################################################################### -# Select and peek events +# Peek events from this observation obs.events.peek() plt.show() ###################################################################### -# Peek the energy dispersion +# Peek the energy dispersion: obs.edisp.peek() plt.show() ###################################################################### -# Peek the psf +# Peek the psf: obs.psf.peek() plt.show() ###################################################################### -# Peek the background for one transit +# Peek the background for one transit: plt.figure() obs.bkg.reduce_over_axes().plot(add_cbar=True) plt.show() ###################################################################### -# Peek the effective exposure for one transit +# Peek the effective exposure for one transit: plt.figure() obs.aeff.reduce_over_axes().plot(add_cbar=True) plt.show() @@ -141,32 +158,35 @@ # Data reduction into a MapDataset # -------------------------------- # -# We will now produce a MapDataset using the data from one of the +# We will now produce a `~gammapy.datasets.MapDataset` using the data from one of the # fHit bins. In order to use all bins, one just needs to repeat this -# process inside of a for loop that modifies the variable fHit. +# process inside a for loop that modifies the variable fHit. ###################################################################### -# First we define the geometry and axes +# First we define the geometry and axes, starting with the energy reco axis: -# create the energy reco axis -# Note that this axis is the one used to create the background model map. If -# different edges are used, the MapDatasetMaker will interpolate between -# them, which might lead to unexpected behavior. energy_axis = MapAxis.from_edges( [1.00, 1.78, 3.16, 5.62, 10.0, 17.8, 31.6, 56.2, 100, 177, 316] * u.TeV, name="energy", interp="log", ) +###################################################################### +# Note: this axis is the one used to create the background model map. If +# different edges are used, the `~gammapy.makers.MapDatasetMaker` will interpolate between +# them, which might lead to unexpected behavior. + +###################################################################### +# Define the energy true axis: -# and energy true axis energy_axis_true = MapAxis.from_energy_bounds( 1e-3, 1e4, nbin=140, unit="TeV", name="energy_true" ) +###################################################################### +# Finally, create a geometry around the Crab location: -# create a geometry around the Crab location geom = WcsGeom.create( skydir=SkyCoord(ra=83.63, dec=22.01, unit="deg", frame="icrs"), width=6 * u.deg, @@ -176,30 +196,31 @@ ###################################################################### -# Define the makers we will use +# Define the makers we will use: maker = MapDatasetMaker(selection=["counts", "background", "exposure", "edisp", "psf"]) -safemask_maker = SafeMaskMaker(methods=["aeff-max"], aeff_percent=10) +safe_mask_maker = SafeMaskMaker(methods=["aeff-max"], aeff_percent=10) ###################################################################### -# Create empty Mapdataset -# The keyword reco_psf=True is needed because the HAWC PSF is +# Create an empty `~gammapy.datasets.MapDataset`. +# The keyword ``reco_psf=True`` is needed because the HAWC PSF is # derived in reconstructed energy. dataset_empty = MapDataset.create( geom, energy_axis_true=energy_axis_true, name="fHit " + str(fHit), reco_psf=True ) -# run the map dataset maker dataset = maker.run(dataset_empty, obs) -# The livetime info is used by the SafeMask maker to retrieve the +###################################################################### +# The livetime information is used by the `~gammapy.makers.SafeMaskMaker` to retrieve the # effective area from the exposure. The HAWC effective area is computed -# for one source transit above 45º zenith, which is around 6h -# Note that since the effective area condition used here is relative to -# the maximum, this value does not actually impact the result +# for one source transit above 45º zenith, which is around 6h. +# Since the effective area condition used here is relative to +# the maximum, this value does not actually impact the result. + dataset.exposure.meta["livetime"] = "6 h" -dataset = safemask_maker.run(dataset) +dataset = safe_mask_maker.run(dataset) ###################################################################### @@ -207,12 +228,13 @@ # one single transit, but our dataset might comprise more. The number # of transits can be derived using the good time intervals (GTI) stored # with the event list. For convenience, the HAWC data release already -# included this quantity as a map +# included this quantity as a map. transit_map = Map.read(data_path + "irfs/TransitsMap_Crab.fits.gz") transit_number = transit_map.get_by_coord(geom.center_skydir) -# Correct the quantities +###################################################################### +# Correct the background and exposure quantities: dataset.background.data *= transit_number dataset.exposure.data *= transit_number @@ -221,17 +243,14 @@ # Check the dataset we produced # ----------------------------- # -# We will now check the contents of the dataset - - -###################################################################### -# We can use the .peek() method to quickly get a glimpse of the contents +# We will now check the contents of the dataset. +# We can use the ``.peek()`` method to quickly get a glimpse of the contents dataset.peek() plt.show() ###################################################################### -# And make significance maps to check that the Crab is visible +# Create significance maps to check that the Crab is visible: excess_estimator = ExcessMapEstimator( correlation_radius="0.2 deg", selection_optional=[], energy_edges=energy_axis.edges @@ -245,6 +264,7 @@ ###################################################################### # Combining all energies + excess_estimator_integrated = ExcessMapEstimator( correlation_radius="0.2 deg", selection_optional=[] ) @@ -269,6 +289,6 @@ # ---------- # # Now you know how to access and work with HAWC data. All other -# tutorials and documentation concerning 3D analysis and MapDatasets +# tutorials and documentation concerning 3D analysis and `~gammapy.datasets.MapDataset`s # can be used from this step. # diff --git a/examples/tutorials/data/hess.py b/examples/tutorials/data/hess.py index e86583a857..8337b12e3f 100644 --- a/examples/tutorials/data/hess.py +++ b/examples/tutorials/data/hess.py @@ -3,6 +3,10 @@ ===================== Explore H.E.S.S. event lists and IRFs. + +Introduction +------------ + `H.E.S.S. `__ is an array of gamma-ray telescopes located in Namibia. Gammapy is regularly used and fully supports H.E.S.S. high level data analysis, after export to the @@ -10,7 +14,7 @@ format `__. The H.E.S.S. data is private, and H.E.S.S. analysis is mostly documented -and discussed at https://hess-confluence.desy.de/ and in +and discussed in the internal Wiki pages and in H.E.S.S.-internal communication channels. However, in 2018, a small sub-set of archival H.E.S.S. data was publicly released, called the `H.E.S.S. DL3 @@ -26,9 +30,8 @@ H.E.S.S. members can find details on the DL3 FITS production on this `Confluence -page `__ -and access more detailed tutorials in this -`repository `__ +page `__ +and access more detailed tutorials in the BitBucket `hess-open-tools` repository. DL3 DR1 ------- @@ -146,6 +149,6 @@ # ---------- # # Now you know how to access and work with H.E.S.S. data. All other -# tutorials and documentation apply to H.E.S.S. and CTA or any other IACT +# tutorials and documentation apply to H.E.S.S. and CTAO or any other IACT # that provides DL3 data and IRFs in the standard format. # diff --git a/gammapy/astro/darkmatter/spectra.py b/gammapy/astro/darkmatter/spectra.py index fd6f302bf9..3b14613614 100644 --- a/gammapy/astro/darkmatter/spectra.py +++ b/gammapy/astro/darkmatter/spectra.py @@ -1,5 +1,6 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst """Dark matter spectra.""" + import numpy as np import astropy.units as u from astropy.table import Table @@ -70,7 +71,6 @@ class PrimaryFlux(TemplateNDSpectralModel): tag = ["PrimaryFlux", "dm-pf"] def __init__(self, mDM, channel): - self.table_path = make_path(self.table_filename) if not self.table_path.exists(): raise FileNotFoundError( @@ -213,7 +213,6 @@ class DarkMatterAnnihilationSpectralModel(SpectralModel): unit="", interp="log", ) - scale._is_norm = True tag = ["DarkMatterAnnihilationSpectralModel", "dm-annihilation"] def __init__(self, mass, channel, scale=scale.quantity, jfactor=1, z=0, k=2): @@ -321,7 +320,6 @@ class DarkMatterDecaySpectralModel(SpectralModel): unit="", interp="log", ) - scale._is_norm = True tag = ["DarkMatterDecaySpectralModel", "dm-decay"] diff --git a/gammapy/catalog/fermi.py b/gammapy/catalog/fermi.py index 4a61e85cef..d942cdfc92 100644 --- a/gammapy/catalog/fermi.py +++ b/gammapy/catalog/fermi.py @@ -407,9 +407,14 @@ def _info_spectral_fit(self): ) fmt = "{:<45s} : {:.3f} +- {:.3f}\n" - ss += fmt.format( - "Spectral index", d[tag + "_Index"], d["Unc_" + tag + "_Index"] - ) + if f"{tag}_ExpfactorS" in d: + ss += fmt.format( + "Spectral index", d[tag + "_IndexS"], d["Unc_" + tag + "_IndexS"] + ) + else: + ss += fmt.format( + "Spectral index", d[tag + "_Index"], d["Unc_" + tag + "_Index"] + ) fmt = "{:<45s} : {:.3} +- {:.3} {}\n" ss += fmt.format( diff --git a/gammapy/catalog/tests/data/4fgl-dr4_J0534.5+2200.txt b/gammapy/catalog/tests/data/4fgl-dr4_J0534.5+2200.txt new file mode 100644 index 0000000000..dd4af66152 --- /dev/null +++ b/gammapy/catalog/tests/data/4fgl-dr4_J0534.5+2200.txt @@ -0,0 +1,70 @@ + +*** Basic info *** + +Catalog row index (zero-based) : 1391 +Source name : 4FGL J0534.5+2200 +Extended name : +Associations : PSR J0534+2200, Crab IC field, Crab pulsar, 3FGL J0534.5+2201, 3FHL J0534.5+2201, 2AGL J0534+2205, EGR J0534+2159 +ASSOC_PROB_BAY : 1.000 +ASSOC_PROB_LR : 1.000 +Class1 : PSR +Class2 : +TeVCat flag : P + +*** Other info *** + +Significance (100 MeV - 1 TeV) : 25.186 +Npred : 98594.1 + +Other flags : 784 + +*** Position info *** + +RA : 83.637 deg +DEC : 22.015 deg +GLON : 184.559 deg +GLAT : -5.781 deg + +Semimajor (68%) : 0.0045 deg +Semiminor (68%) : 0.0044 deg +Position angle (68%) : -71.52 deg +Semimajor (95%) : 0.0073 deg +Semiminor (95%) : 0.0072 deg +Position angle (95%) : -71.52 deg +ROI number : 270 + +*** Spectral info *** + +Spectrum type : PLSuperExpCutoff +Detection significance (100 MeV - 1 TeV) : 25.186 +Exponential factor : 0.2337 +- 0.0188 +Super-exponential cutoff index : 0.5219 +- 0.0946 +Significance curvature : 27.6 +Pivot energy : 1343 MeV +Spectral index : 2.274 +- 0.008 +Flux Density at pivot energy : 1.1e-10 +- 6.99e-13 cm-2 MeV-1 s-1 +Integral flux (1 - 100 GeV) : 1.54e-07 +- 8.44e-10 cm-2 s-1 +Energy flux (100 MeV - 100 GeV) : 1.46e-09 +- 2.16e-11 erg cm-2 s-1 + +*** Spectral points *** + + e_min e_max flux flux_errn flux_errp e2dnde e2dnde_errn e2dnde_errp is_ul flux_ul e2dnde_ul sqrt_ts + MeV MeV 1 / (s cm2) 1 / (s cm2) 1 / (s cm2) erg / (s cm2) erg / (s cm2) erg / (s cm2) 1 / (s cm2) erg / (s cm2) +---------- ----------- ----------- ----------- ----------- ------------- ------------- ------------- ----- ----------- ------------- ------- + 50.000 100.000 2.365e-06 2.901e-07 2.916e-07 3.800e-10 4.662e-11 4.686e-11 False nan nan 8.309 + 100.000 300.000 1.595e-06 3.088e-08 3.088e-08 3.840e-10 7.436e-12 7.436e-12 False nan nan 60.667 + 300.000 1000.000 5.546e-07 5.472e-09 5.472e-09 3.760e-10 3.710e-12 3.710e-12 False nan nan 138.346 + 1000.000 3000.000 1.255e-07 8.804e-10 8.804e-10 2.918e-10 2.047e-12 2.047e-12 False nan nan 155.573 + 3000.000 10000.000 2.464e-08 8.789e-10 8.789e-10 1.552e-10 5.536e-12 5.536e-12 False nan nan 9.059 + 10000.000 30000.000 2.149e-09 4.120e-10 2.330e-10 4.498e-11 8.622e-12 4.877e-12 False nan nan 3.116 + 30000.000 100000.000 1.542e-16 nan 4.113e-12 8.085e-18 nan 2.157e-13 True 8.227e-12 4.314e-13 0.000 +100000.000 1000000.000 6.125e-18 nan 3.875e-12 5.675e-19 nan 3.590e-13 True 7.750e-12 7.180e-13 0.000 +*** Lightcurve info *** + +Lightcurve measured in the energy band: 100 MeV - 100 GeV + +Variability index : 156.729 +Significance peak (100 MeV - 100 GeV) : 154.788 +Integral flux peak (100 MeV - 100 GeV) : 2.54e-06 +- 2.55e-08 cm^-2 s^-1 +Time peak : 3.18e+08 s (Mission elapsed time) +Peak interval : 3.65e+02 day diff --git a/gammapy/catalog/tests/test_fermi.py b/gammapy/catalog/tests/test_fermi.py index 08f509a831..15d8290ae7 100644 --- a/gammapy/catalog/tests/test_fermi.py +++ b/gammapy/catalog/tests/test_fermi.py @@ -113,6 +113,17 @@ ), ] +SOURCES_4FGL_DR4 = [ + dict( + idx=1391, + name="4FGL J0534.5+2200", + str_ref_file="data/4fgl-dr4_J0534.5+2200.txt", + spec_type=SuperExpCutoffPowerLaw4FGLDR3SpectralModel, + dnde=u.Quantity(1.1048e-07, "cm-2 s-1 GeV-1"), + dnde_err=u.Quantity(6.9934e-10, "cm-2 s-1 GeV-1"), + ) +] + SOURCES_3FGL = [ dict( idx=0, @@ -186,9 +197,10 @@ @requires_data() -def test_4FGL_DR4(): +@pytest.mark.parametrize("ref", SOURCES_4FGL_DR4, ids=lambda _: _["name"]) +def test_4FGL_DR4(ref): cat = SourceCatalog4FGL("$GAMMAPY_DATA/catalogs/fermi/gll_psc_v32.fit.gz") - source = cat["4FGL J0534.5+2200"] + source = cat[ref["name"]] model = source.spectral_model() fp = source.flux_points not_ul = ~fp.is_ul.data.squeeze() @@ -199,6 +211,11 @@ def test_4FGL_DR4(): models = cat.to_models() assert len(models) == len(cat.table) + actual = str(cat[ref["idx"]]) + with open(get_pkg_data_filename(ref["str_ref_file"])) as fh: + expected = fh.read() + assert actual == modify_unit_order_astropy_5_3(expected) + @requires_data() class TestFermi4FGLObject: @@ -363,9 +380,9 @@ def test_lightcurve_dr1(self): assert_allclose(table["flux_errn"][0], 4.437058e-8, rtol=1e-3) def test_lightcurve_dr4(self): - dr2 = SourceCatalog4FGL("$GAMMAPY_DATA/catalogs/fermi/gll_psc_v32.fit.gz") - source_dr2 = dr2[self.source_name] - table = source_dr2.lightcurve(interval="1-year").to_table( + dr4 = SourceCatalog4FGL("$GAMMAPY_DATA/catalogs/fermi/gll_psc_v32.fit.gz") + source_dr4 = dr4[self.source_name] + table = source_dr4.lightcurve(interval="1-year").to_table( format="lightcurve", sed_type="flux" ) @@ -379,7 +396,7 @@ def test_lightcurve_dr4(self): assert_allclose(table["flux_errn"][0], 2.298336e-08, rtol=1e-3) with pytest.raises(ValueError): - source_dr2.lightcurve(interval="2-month") + source_dr4.lightcurve(interval="2-month") @requires_data() diff --git a/gammapy/data/data_store.py b/gammapy/data/data_store.py index 545c629c08..4191be1ef1 100644 --- a/gammapy/data/data_store.py +++ b/gammapy/data/data_store.py @@ -197,11 +197,6 @@ def from_events_files(cls, events_paths, irfs_paths=None): - ``CALDB`` (example: ``CALDB = 1dc``) - ``IRF`` (example: ``IRF = South_z20_50h``) - This method is useful specifically if you want to load data simulated - with `ctobssim`_. - - .. _ctobssim: http://cta.irap.omp.eu/ctools/users/reference_manual/ctobssim.html - Parameters ---------- events_paths : list of str or `~pathlib.Path` @@ -732,7 +727,6 @@ def get_hdu_table_rows(self, events_path, irf_path=None): yield dict(HDU_TYPE="bkg", HDU_CLASS="bkg_3d", HDU_NAME="BACKGROUND", **info) -# TODO: load IRF file, and infer HDU_CLASS from IRF file contents! class CalDBIRF: """Helper class to work with IRFs in CALDB format.""" diff --git a/gammapy/data/event_list.py b/gammapy/data/event_list.py index 0b55f477a2..3dab79a8d0 100644 --- a/gammapy/data/event_list.py +++ b/gammapy/data/event_list.py @@ -14,12 +14,10 @@ import matplotlib.pyplot as plt from gammapy.maps import MapAxis, MapCoord, RegionGeom, WcsNDMap from gammapy.maps.axes import UNIT_STRING_FORMAT -from gammapy.utils.deprecation import deprecated from gammapy.utils.fits import earth_location_from_dict from gammapy.utils.scripts import make_path from gammapy.utils.testing import Checker from gammapy.utils.time import time_ref_from_dict -from .gti import GTI from .metadata import EventListMetaData __all__ = ["EventList"] @@ -151,60 +149,6 @@ def to_table_hdu(self, format="gadf"): return fits.BinTableHDU(self.table, name="EVENTS") - @deprecated( - since="v1.2", - message="To write an EventList utilise Observation.write() with include_irfs=False", - ) - def write(self, filename, gti=None, overwrite=False, format="gadf", checksum=False): - """Write the event list to a FITS file. - - If a GTI object is provided, it is saved into - a second extension in the file. - - Parameters - ---------- - filename : `pathlib.Path` or str - Filename. - gti : `~gammapy.data.GTI`, optional - Good Time Intervals object to save to the same file. - Default is None. - overwrite : bool, optional - Overwrite existing file. Default is False. - format : str, optional - FITS format convention. Default is "gadf". - checksum : bool, optional - When True adds both DATASUM and CHECKSUM cards to the headers written to the file. - Default is False. - """ - if format != "gadf": - raise ValueError(f"{format} is not a valid EventList format.") - - meta_dict = self.table.meta - - if "HDUCLAS1" in meta_dict.keys() and meta_dict["HDUCLAS1"] != "EVENTS": - raise ValueError("The HDUCLAS1 keyword must be 'EVENTS' for an EventList") - else: - meta_dict["HDUCLAS1"] = "EVENTS" - - if "HDUCLASS" in meta_dict.keys() and meta_dict["HDUCLASS"] != "GADF": - raise ValueError("The HDUCLASS must be 'GADF' for format 'gadf'") - else: - meta_dict["HDUCLASS"] = "GADF" - - filename = make_path(filename) - - primary_hdu = fits.PrimaryHDU() - hdu_evt = self.to_table_hdu(format=format) - hdu_all = fits.HDUList([primary_hdu, hdu_evt]) - - if gti is not None: - if not isinstance(gti, GTI): - raise TypeError("gti must be an instance of GTI") - hdu_gti = gti.to_table_hdu(format=format) - hdu_all.append(hdu_gti) - - hdu_all.writeto(filename, overwrite=overwrite, checksum=checksum) - # TODO: Pass metadata here. Also check that specific meta contents are consistent @classmethod def from_stack(cls, event_lists, **kwargs): diff --git a/gammapy/data/metadata.py b/gammapy/data/metadata.py index 00a7b8b88d..ae2d55f78e 100644 --- a/gammapy/data/metadata.py +++ b/gammapy/data/metadata.py @@ -47,9 +47,9 @@ class ObservationMetaData(MetaData): ---------- obs_info : `~gammapy.utils.ObsInfoMetaData` The general observation information. - pointing : `~gammapy.utils.PointingInfoMetaData + pointing : `~gammapy.utils.PointingInfoMetaData` The pointing metadata. - target : `~gammapy.utils.TargetMetaData + target : `~gammapy.utils.TargetMetaData` The target metadata. creation : `~gammapy.utils.CreatorMetaData` The creation metadata. diff --git a/gammapy/data/observations.py b/gammapy/data/observations.py index fb7ce44229..953c08ba61 100644 --- a/gammapy/data/observations.py +++ b/gammapy/data/observations.py @@ -15,7 +15,7 @@ from astropy.units import Quantity from astropy.utils import lazyproperty import matplotlib.pyplot as plt -from gammapy.utils.deprecation import GammapyDeprecationWarning, deprecated +from gammapy.utils.deprecation import GammapyDeprecationWarning from gammapy.utils.fits import LazyFitsData, earth_location_to_dict from gammapy.utils.metadata import CreatorMetaData, TargetMetaData, TimeInfoMetaData from gammapy.utils.scripts import make_path @@ -39,8 +39,6 @@ class Observation: ---------- obs_id : int, optional Observation id. Default is None - obs_info : dict, optional - Observation info dict. Default is None. aeff : `~gammapy.irf.EffectiveAreaTable2D`, optional Effective area. Default is None. edisp : `~gammapy.irf.EnergyDispersion2D`, optional @@ -77,7 +75,6 @@ def __init__( self, obs_id=None, meta=None, - obs_info=None, gti=None, aeff=None, edisp=None, @@ -89,14 +86,6 @@ def __init__( pointing=None, location=None, ): - if obs_info is not None: - warnings.warn( - "obs_info argument is deprecated since v1.2. Use meta instead.", - GammapyDeprecationWarning, - ) - if meta is None: - meta = ObservationMetaData.from_header(obs_info) - self.obs_id = obs_id self.aeff = aeff self.edisp = edisp @@ -351,15 +340,6 @@ def observation_dead_time_fraction(self): """ return self.meta.deadtime_fraction - @property - def obs_info(self): - """Observation information dictionary.""" - warnings.warn( - "obs_info property is deprecated since v1.2. Use meta instead.", - GammapyDeprecationWarning, - ) - return self.meta.to_header() - @property def pointing(self): """Get the pointing for the observation as a `~gammapy.data.FixedPointingInfo` object.""" @@ -387,18 +367,6 @@ def target_radec(self): """Target RA / DEC sky coordinates as a `~astropy.coordinates.SkyCoord` object.""" return self.meta.target.position - @property - @deprecated( - "v1.2", - message="Access additional metadata directly in obs.meta.opt.", - ) - def muoneff(self): - """Observation muon efficiency.""" - if "MUONEFF" in self.meta.optional: - return self.meta.optional["MUONEFF"] - else: - raise KeyError("No muon efficiency information.") - def __str__(self): pointing = self.get_pointing_icrs(self.tmid) ra = pointing.ra.deg @@ -622,7 +590,6 @@ def copy(self, in_memory=False, **kwargs): if in_memory: argnames = inspect.getfullargspec(self.__init__).args # TODO: remove once obs_info is removed from the list of arguments in __init__ - argnames.remove("obs_info") argnames.remove("self") for name in argnames: diff --git a/gammapy/data/tests/test_event_list.py b/gammapy/data/tests/test_event_list.py index 237dcbb348..fce94ee33c 100644 --- a/gammapy/data/tests/test_event_list.py +++ b/gammapy/data/tests/test_event_list.py @@ -3,12 +3,10 @@ from numpy.testing import assert_allclose from astropy import units as u from astropy.coordinates import SkyCoord -from astropy.io import fits from astropy.table import Table from regions import CircleSkyRegion, RectangleSkyRegion from gammapy.data import GTI, EventList, Observation from gammapy.maps import MapAxis, WcsGeom -from gammapy.utils.deprecation import GammapyDeprecationWarning from gammapy.utils.testing import mpl_plot_check, requires_data @@ -62,13 +60,6 @@ def test_write(self): obs = Observation(events=self.events, gti=gti.table) obs.write("test.fits", overwrite=True) - def test_write_checksum(self): - with pytest.raises(GammapyDeprecationWarning): - self.events.write("test.fits", overwrite=True, checksum=True) - hdu = fits.open("test.fits")["EVENTS"] - assert "CHECKSUM" in hdu.header - assert "DATASUM" in hdu.header - @requires_data() class TestEventListHESS: diff --git a/gammapy/datasets/flux_points.py b/gammapy/datasets/flux_points.py index f92bd62288..30e767527a 100644 --- a/gammapy/datasets/flux_points.py +++ b/gammapy/datasets/flux_points.py @@ -40,8 +40,7 @@ def _get_reference_model(model, energy_bounds, margin_percent=70): class FluxPointsDataset(Dataset): - """Bundle a set of flux points with a parametric model, - to compute fit statistic function using chi2 statistics. + """Bundle a set of flux points with a parametric model, to compute fit statistic function using chi2 statistics. For more information see :ref:`datasets`. @@ -60,23 +59,21 @@ class FluxPointsDataset(Dataset): One line per observation for stacked datasets. stat_type : str Method used to compute the statistics: - * chi2 : estimate from chi2 statistics. - * profile : estimate from interpolation of the likelihood profile. - * distrib : Assuming gaussian errors the likelihood is given by the - probability density function of the normal distribution. - For the upper limit case it is necessary to marginalize over the unknown measurement, - So we integrate the normal distribution up to the upper limit value - which gives the complementary error function. - See eq. C7 of Mohanty et al (2013) : - https://iopscience.iop.org/article/10.1088/0004-637X/773/2/168/pdf + + * chi2 : estimate from chi2 statistics. + * profile : estimate from interpolation of the likelihood profile. + * distrib : Assuming gaussian errors the likelihood is given by the probability density function + of the normal distribution. For the upper limit case it is necessary to marginalize over the unknown + measurement, so we integrate the normal distribution up to the upper limit value which gives the + complementary error function. See eq. C7 of `Mohanty et al (2013) `__ Default is `chi2`, in that case upper limits are ignored and the mean of asymetrics error is used. - However it is recommended to use `profile` if `stat_scan` is available on flux points. + However, it is recommended to use `profile` if `stat_scan` is available on flux points. The `distrib` case provides an approximation if the profile is not available. stat_kwargs : dict Extra arguments specifying the interpolation scheme of the likelihood profile. Used only if `stat_type=="profile"`. In that case the default is : - `stat_kwargs={"interp_scale":"sqrt", "extrapolate":True} + `stat_kwargs={"interp_scale":"sqrt", "extrapolate":True}` Examples -------- @@ -113,11 +110,11 @@ class FluxPointsDataset(Dataset): message : Hesse terminated successfully. >>> print(result.parameters.to_table()) - type name value unit ... frozen is_norm link prior - ---- --------- ---------- -------------- ... ------ ------- ---- ----- - index 2.2159e+00 ... False False - amplitude 2.1619e-13 TeV-1 s-1 cm-2 ... False True - reference 1.0000e+00 TeV ... True False + type name value unit ... frozen link prior + ---- --------- ---------- -------------- ... ------ ---- ----- + index 2.2159e+00 ... False + amplitude 2.1619e-13 TeV-1 s-1 cm-2 ... False + reference 1.0000e+00 TeV ... True Note: In order to reproduce the example, you need the tests datasets folder. You may download it with the command: @@ -171,7 +168,6 @@ def stat_type(self): @stat_type.setter def stat_type(self, stat_type): - if stat_type not in self.available_stat_type: raise ValueError( f"Invalid stat_type: possible options are {self.available_stat_type}" @@ -592,9 +588,10 @@ def plot_residuals(self, ax=None, method="diff", **kwargs): if method == "diff/model": model = self.flux_pred() - yerr = (yerr[0].quantity / model).squeeze(), ( - yerr[1].quantity / model - ).squeeze() + yerr = ( + (yerr[0].quantity / model).squeeze(), + (yerr[1].quantity / model).squeeze(), + ) elif method == "diff": yerr = yerr[0].quantity.squeeze(), yerr[1].quantity.squeeze() else: diff --git a/gammapy/datasets/map.py b/gammapy/datasets/map.py index adb6211231..d14b1d7de4 100644 --- a/gammapy/datasets/map.py +++ b/gammapy/datasets/map.py @@ -2018,14 +2018,16 @@ def slice_by_idx(self, slices, name=None): >>> sliced = dataset.slice_by_idx(slices) >>> print(sliced.geoms["geom"]) WcsGeom - axes : ['lon', 'lat', 'energy'] - shape : (320, 240, 3) - ndim : 3 - frame : galactic - projection : CAR - center : 0.0 deg, 0.0 deg - width : 8.0 deg x 6.0 deg - wcs ref : 0.0 deg, 0.0 deg + + axes : ['lon', 'lat', 'energy'] + shape : (np.int64(320), np.int64(240), 3) + ndim : 3 + frame : galactic + projection : CAR + center : 0.0 deg, 0.0 deg + width : 8.0 deg x 6.0 deg + wcs ref : 0.0 deg, 0.0 deg + """ name = make_name(name) kwargs = {"gti": self.gti, "name": name, "meta_table": self.meta_table} @@ -2074,7 +2076,7 @@ def slice_by_energy(self, energy_min=None, energy_max=None, name=None): >>> dataset = MapDataset.read("$GAMMAPY_DATA/cta-1dc-gc/cta-1dc-gc.fits.gz") >>> sliced = dataset.slice_by_energy(energy_min="1 TeV", energy_max="5 TeV") >>> sliced.data_shape - (3, 240, 320) + (3, np.int64(240), np.int64(320)) """ name = make_name(name) diff --git a/gammapy/datasets/simulate.py b/gammapy/datasets/simulate.py index 5801ae9654..68a7f37d12 100644 --- a/gammapy/datasets/simulate.py +++ b/gammapy/datasets/simulate.py @@ -206,7 +206,9 @@ def _sample_coord_time_energy(self, dataset, model): coords = npred.sample_coord(n_events=n_events, random_state=self.random_state) - coords["time"] = Time(coords["time"], format="mjd", scale="tt") + coords["time"] = Time( + coords["time"], format="mjd", scale=dataset.gti.time_ref.scale + ) table = self._make_table(coords, dataset.gti.time_ref) diff --git a/gammapy/datasets/tests/test_simulate.py b/gammapy/datasets/tests/test_simulate.py index 27db7b5333..979a425b1d 100644 --- a/gammapy/datasets/tests/test_simulate.py +++ b/gammapy/datasets/tests/test_simulate.py @@ -257,6 +257,31 @@ def test_sample_coord_time_energy(dataset, energy_dependent_temporal_sky_model): rtol=1e-6, ) + irfs = load_irf_dict_from_file( + "$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits" + ) + livetime = 1.0 * u.hr + pointing = FixedPointingInfo( + fixed_icrs=SkyCoord(0, 0, unit="deg", frame="galactic").icrs, + ) + obs = Observation.create( + obs_id=1001, + pointing=pointing, + livetime=livetime, + irfs=irfs, + location=LOCATION, + ) + + new_mod = Models( + [ + energy_dependent_temporal_sky_model, + FoVBackgroundModel(dataset_name=dataset.name), + ] + ) + dataset.models = new_mod + events = sampler.run(dataset, obs) + assert dataset.gti.time_ref.scale == events.table.meta["TIMESYS"] + @requires_data() def test_fail_sample_coord_time_energy( @@ -307,11 +332,18 @@ def test_sample_coord_time_energy_random_seed( assert len(events) == 1256 assert_allclose( - [events[0][0], events[0][1], events[0][2], events[0][3]], - [0.2998, 1.932196, 266.404988, -28.936178], + [events[0][1], events[0][2], events[0][3]], + [1.932196, 266.404988, -28.936178], rtol=1e-3, ) + # Important: do not increase the tolerance! + assert_allclose( + events[0][0], + 0.29982, + rtol=1.5e-6, + ) + @requires_data() def test_sample_coord_time_energy_unit(dataset, energy_dependent_temporal_sky_model): @@ -326,11 +358,18 @@ def test_sample_coord_time_energy_unit(dataset, energy_dependent_temporal_sky_mo assert len(events) == 1254 assert_allclose( - [events[0][0], events[0][1], events[0][2], events[0][3]], - [854.108591, 6.22904, 266.404988, -28.936178], + [events[0][1], events[0][2], events[0][3]], + [6.22904, 266.404988, -28.936178], rtol=1e-6, ) + # Important: do not increase the tolerance! + assert_allclose( + events[0][0], + 854.10859, + rtol=1.5e-6, + ) + @requires_data() def test_mde_sample_sources(dataset, models): @@ -378,6 +417,9 @@ def test_sample_sources_energy_dependent(dataset, energy_dependent_temporal_sky_ assert_allclose(events.table["TIME"][0], 95.464699, rtol=1e-5) + dt = np.max(events.table["TIME"]) - np.min(events.table["TIME"]) + assert dt <= dataset.gti.time_sum.to("s").value + sampler.t_delta.to("s").value + @requires_data() def test_mde_sample_weak_src(dataset, models): diff --git a/gammapy/datasets/utils.py b/gammapy/datasets/utils.py index 6316b93785..5270db5322 100644 --- a/gammapy/datasets/utils.py +++ b/gammapy/datasets/utils.py @@ -48,10 +48,11 @@ def apply_edisp(input_map, edisp): geom : WcsGeom axes : ['lon', 'lat', 'energy'] - shape : (50, 50, 3) + shape : (np.int64(50), np.int64(50), 3) ndim : 3 unit : dtype : float64 + """ # TODO: either use sparse matrix multiplication or something like edisp.is_diagonal if edisp is not None: diff --git a/gammapy/estimators/energydependentmorphology.py b/gammapy/estimators/energydependentmorphology.py index 52a51f88d7..3221603219 100644 --- a/gammapy/estimators/energydependentmorphology.py +++ b/gammapy/estimators/energydependentmorphology.py @@ -1,5 +1,6 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst """Implementation of energy-dependent morphology estimator tool.""" + import numpy as np from gammapy.datasets import Datasets from gammapy.modeling import Fit @@ -47,7 +48,6 @@ def weighted_chi2_parameter(results_edep, parameters=["sigma"]): are independent, which cannot be guaranteed in this use case. """ - chi2_value = [] df = [] for parameter in parameters: @@ -91,7 +91,6 @@ class EnergyDependentMorphologyEstimator(Estimator): tag = "EnergyDependentMorphologyEstimator" def __init__(self, energy_edges, source=0, fit=None): - self.energy_edges = energy_edges self.source = source self.num_energy_bands = len(self.energy_edges) - 1 @@ -301,7 +300,6 @@ def run(self, datasets): results : `dict` Dictionary with the various energy-dependence estimation values. """ - if not isinstance(datasets, Datasets) or datasets.is_all_same_type is False: raise ValueError("Unsupported datasets type.") diff --git a/gammapy/estimators/flux.py b/gammapy/estimators/flux.py index e4c7a26620..a056b4065f 100644 --- a/gammapy/estimators/flux.py +++ b/gammapy/estimators/flux.py @@ -7,7 +7,6 @@ from gammapy.estimators.utils import _get_default_norm from gammapy.maps import Map, MapAxis from gammapy.modeling.models import ScaleSpectralModel -from gammapy.utils.deprecation import deprecated_attribute, deprecated_renamed_argument log = logging.getLogger(__name__) @@ -28,14 +27,6 @@ class FluxEstimator(ParameterEstimator): ---------- source : str or int For which source in the model to compute the flux. - norm_min : float - Minimum value for the norm used for the fit statistic profile evaluation. - norm_max : float - Maximum value for the norm used for the fit statistic profile evaluation. - norm_n_values : int - Number of norm values used for the fit statistic profile. - norm_values : `numpy.ndarray` - Array of norm values to be used for the fit statistic profile. n_sigma : int Sigma to use for asymmetric error computation. n_sigma_ul : int @@ -58,12 +49,11 @@ class FluxEstimator(ParameterEstimator): If False only the norm of the source of interest if fitted, and all other parameters are frozen at their current values. Default is False. - norm : ~gammapy.modeling.Parameter` - norm : ~gammapy.modeling.Parameter` or dict - Norm parameter used for the fit + norm : `~gammapy.modeling.Parameter` or dict + Norm parameter used for the fit. Default is None and a new parameter is created automatically, with value=1, name="norm", scan_min=0.2, scan_max=5, and scan_n_values = 11. - By default the min and max are not set and derived from the source model, + By default, the min and max are not set and derived from the source model, unless the source model does not have one and only one norm parameter. If a dict is given the entries should be a subset of `~gammapy.modeling.Parameter` arguments. @@ -71,23 +61,9 @@ class FluxEstimator(ParameterEstimator): tag = "FluxEstimator" - norm_min = deprecated_attribute("norm_min", "1.2") - norm_max = deprecated_attribute("norm_max", "1.2") - norm_n_values = deprecated_attribute("norm_n_values", "1.2") - norm_values = deprecated_attribute("norm_values", "1.2") - - @deprecated_renamed_argument( - ["norm_min", "norm_max", "norm_n_values", "norm_values"], - [None, None, None, None], - ["1.2", "1.2", "1.2", "1.2"], - ) def __init__( self, source=0, - norm_min=0.2, - norm_max=5, - norm_n_values=11, - norm_values=None, n_sigma=1, n_sigma_ul=2, selection_optional=None, @@ -95,17 +71,9 @@ def __init__( reoptimize=False, norm=None, ): - self.source = source - self.norm = _get_default_norm( - norm, - scan_min=norm_min, - scan_max=norm_max, - scan_n_values=norm_n_values, - scan_values=norm_values, - interp="log", - ) + self.norm = _get_default_norm(norm, interp="log") super().__init__( null_value=0, @@ -116,16 +84,6 @@ def __init__( reoptimize=reoptimize, ) - def _set_norm_parameter(self, norm=None, scaled_parameter=None): - """Define properties of the norm spectral parameter.""" - - if np.isnan(self.norm.min): - norm.min = scaled_parameter.min / scaled_parameter.value - if np.isnan(self.norm.max): - norm.max = scaled_parameter.max / scaled_parameter.value - norm.interp = scaled_parameter.interp - return norm - def get_scale_model(self, models): """Set scale model. @@ -141,14 +99,6 @@ def get_scale_model(self, models): """ ref_model = models[self.source].spectral_model scale_model = ScaleSpectralModel(ref_model) - - norms = ref_model.parameters.norm_parameters - - if len(norms.free_parameters) == 1: - self.norm = self._set_norm_parameter( - self.norm.copy(), norms.free_parameters[0] - ) - scale_model.norm = self.norm.copy() return scale_model diff --git a/gammapy/estimators/map/asmooth.py b/gammapy/estimators/map/asmooth.py index bd4619e8db..9555e2df3f 100644 --- a/gammapy/estimators/map/asmooth.py +++ b/gammapy/estimators/map/asmooth.py @@ -1,5 +1,6 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst """Implementation of adaptive smoothing algorithms.""" + import numpy as np from astropy.convolution import Gaussian2DKernel, Tophat2DKernel from astropy.coordinates import Angle @@ -11,6 +12,7 @@ from gammapy.utils.pbar import progress_bar from ..core import Estimator from ..utils import estimate_exposure_reco_energy +from gammapy.utils.deprecation import deprecated_renamed_argument __all__ = ["ASmoothMapEstimator"] @@ -36,8 +38,8 @@ class ASmoothMapEstimator(Estimator): Smoothing scales. kernel : `astropy.convolution.Kernel` Smoothing kernel. - spectrum : `SpectralModel` - Spectral model assumption. + spectral_model : `~gammapy.modeling.models.SpectralModel`, optional + Spectral model assumption. Default is power-law with spectral index of 2. method : {'lima', 'asmooth'} Significance estimation method. Default is 'lima'. threshold : float @@ -62,19 +64,20 @@ class ASmoothMapEstimator(Estimator): tag = "ASmoothMapEstimator" + @deprecated_renamed_argument("spectrum", "spectral_model", "v1.3") def __init__( self, scales=None, kernel=Gaussian2DKernel, - spectrum=None, + spectral_model=None, method="lima", threshold=5, energy_edges=None, ): - if spectrum is None: - spectrum = PowerLawSpectralModel() + if spectral_model is None: + spectral_model = PowerLawSpectralModel(index=2) - self.spectrum = spectrum + self.spectral_model = spectral_model if scales is None: scales = self.get_scales(n_scales=9, kernel=kernel) @@ -230,7 +233,7 @@ def estimate_maps(self, dataset): background = dataset_image.background.data[0] if dataset_image.exposure is not None: - exposure = estimate_exposure_reco_energy(dataset_image, self.spectrum) + exposure = estimate_exposure_reco_energy(dataset_image, self.spectral_model) else: exposure = None diff --git a/gammapy/estimators/map/core.py b/gammapy/estimators/map/core.py index 98133c22cc..29ae80f48d 100644 --- a/gammapy/estimators/map/core.py +++ b/gammapy/estimators/map/core.py @@ -186,7 +186,7 @@ def filter_success_nan(self, value): @property def available_quantities(self): - """Available quantities""" + """Available quantities.""" return list(self._data.keys()) @staticmethod @@ -343,7 +343,7 @@ def reference_model(self): @property def reference_spectral_model(self): - """Reference spectral model as a `SpectralModel`""" + """Reference spectral model as a `SpectralModel`.""" return self.reference_model.spectral_model @property @@ -524,7 +524,7 @@ def ts(self): @property def ts_scan(self): - """Test statistic scan as a `~gammapy.maps.Map` object""" + """Test statistic scan as a `~gammapy.maps.Map` object.""" return self.stat_scan - np.expand_dims(self.stat.data, 2) # TODO: always derive sqrt(TS) from TS? @@ -609,7 +609,7 @@ def acceptance_on(self): @property def acceptance_off(self): - """The acceptance in the off region""" + """The acceptance in the off region.""" self._check_quantity("acceptance_off") return self._data["acceptance_off"] @@ -893,7 +893,6 @@ def iter_by_axis(self, axis_name): FluxMap iteration. """ - split_maps = {} axis = self.geom.axes[axis_name] gti = self.gti @@ -1129,22 +1128,21 @@ def read(cls, filename, checksum=False): return cls.from_hdulist(hdulist, checksum=checksum) def copy(self, reference_model=None): - """Deep copy + """Deep copy. Parameters ---------- - reference_model : `~gammapy.modeling.models.SkyModel`, optional - The reference model to use for conversions. If None, the originial model is copied. + The reference model to use for conversions. If None, the original model is copied. Flux maps have been obtained for a specific reference model. Changing it will change the fluxes. Handle with care. + Returns ------- flux_maps : `~gammapy.estimators.FluxMaps` Copied flux maps object. """ - new = deepcopy(self) if reference_model is not None: new._reference_model = reference_model.copy() @@ -1177,7 +1175,6 @@ def slice_by_idx(self, slices): >>> slices = {"energy": slice(0, 2)} >>> sliced = fp.slice_by_idx(slices) """ - data = {} for key, item in self._data.items(): @@ -1191,7 +1188,7 @@ def slice_by_idx(self, slices): ) def slice_by_coord(self, slices): - """Slice flux maps by coordinate values + """Slice flux maps by coordinate values. Parameters ---------- @@ -1215,7 +1212,6 @@ def slice_by_coord(self, slices): >>> slices = {"time": slice(2035.93*u.day, 2036.05*u.day)} >>> sliced = lc_1d.slice_by_coord(slices) """ - idx_intervals = [] for key, interval in zip(slices.keys(), slices.values()): @@ -1252,7 +1248,6 @@ def slice_by_time(self, time_min, time_max): >>> lc_1d = FluxPoints.read("$GAMMAPY_DATA/estimators/pks2155_hess_lc/pks2155_hess_lc.fits") >>> sliced = lc_1d.slice_by_time(time_min=2035.93*u.day, time_max=2036.05*u.day) """ - time_slice = slice(time_min, time_max) return self.slice_by_coord({"time": time_slice}) @@ -1277,7 +1272,6 @@ def slice_by_energy(self, energy_min, energy_max): >>> fp = FluxPoints.read("$GAMMAPY_DATA/estimators/crab_hess_fp/crab_hess_fp.fits") >>> sliced = fp.slice_by_energy(energy_min=2*u.TeV, energy_max=10*u.TeV) """ - energy_slice = slice(energy_min, energy_max) return self.slice_by_coord({"energy": energy_slice}) diff --git a/gammapy/estimators/map/excess.py b/gammapy/estimators/map/excess.py index b608ee9cc6..31d7075dc8 100644 --- a/gammapy/estimators/map/excess.py +++ b/gammapy/estimators/map/excess.py @@ -103,7 +103,6 @@ def convolved_map_dataset_counts_statistics(convolved_maps, stat_type): counts_statistic : `~gammapy.stats.CashCountsStatistic` or `~gammapy.stats.WStatCountsStatistic` The counts statistic. """ - if stat_type == "wstat": n_on_conv = convolved_maps["n_on_conv"] n_off = convolved_maps["n_off"] @@ -196,7 +195,7 @@ class ExcessMapEstimator(Estimator): geom : WcsGeom axes : ['lon', 'lat', 'energy'] - shape : (320, 240, 1) + shape : (np.int64(320), np.int64(240), 1) quantities : ['npred', 'npred_excess', 'counts', 'ts', 'sqrt_ts', 'norm', 'norm_err'] ref. model : pl n_sigma : 1 @@ -258,7 +257,7 @@ def correlation_radius(self, correlation_radius): self._correlation_radius = Angle(correlation_radius) def run(self, dataset): - """Compute correlated excess, Li & Ma significance and flux maps + """Compute correlated excess, Li & Ma significance and flux maps. If a model is set on the dataset the excess map estimator will compute the excess taking into account the predicted counts of the model. @@ -318,7 +317,6 @@ def estimate_kernel(self, dataset): kernel : `~astropy.convolution.Tophat2DKernel` Kernel. """ - pixel_size = np.mean(np.abs(dataset.counts.geom.wcs.wcs.cdelt)) size = self.correlation_radius.deg / pixel_size kernel = Tophat2DKernel(size) @@ -350,8 +348,7 @@ def estimate_mask_default(dataset): return mask def estimate_exposure_reco_energy(self, dataset, kernel, mask, reco_exposure): - """Estimate exposure map in reconstructed energy for a single dataset - assuming the given spectral_model shape. + """Estimate exposure map in reconstructed energy for a single dataset assuming the given spectral_model shape. Parameters ---------- @@ -387,7 +384,6 @@ def estimate_excess_map(self, dataset, reco_exposure): dataset : `~gammapy.datasets.MapDataset` Map dataset. """ - kernel = self.estimate_kernel(dataset) geom = dataset.counts.geom mask = self.estimate_mask_default(dataset) diff --git a/gammapy/estimators/map/tests/test_asmooth.py b/gammapy/estimators/map/tests/test_asmooth.py index 7613f19105..90920215f3 100644 --- a/gammapy/estimators/map/tests/test_asmooth.py +++ b/gammapy/estimators/map/tests/test_asmooth.py @@ -7,6 +7,8 @@ from gammapy.estimators import ASmoothMapEstimator from gammapy.maps import Map, MapAxis, WcsNDMap from gammapy.utils.testing import requires_data +from gammapy.modeling.models import PowerLawSpectralModel +from gammapy.utils.deprecation import GammapyDeprecationWarning @pytest.fixture(scope="session") @@ -39,6 +41,9 @@ def test_asmooth(input_dataset_simple): kernel = Tophat2DKernel scales = ASmoothMapEstimator.get_scales(3, factor=2, kernel=kernel) * 0.1 * u.deg + with pytest.warns(GammapyDeprecationWarning): + ASmoothMapEstimator(scales=scales, spectrum=PowerLawSpectralModel()) + asmooth = ASmoothMapEstimator( scales=scales, kernel=kernel, method="lima", threshold=2.5 ) diff --git a/gammapy/estimators/map/tests/test_ts.py b/gammapy/estimators/map/tests/test_ts.py index 674cabf6ea..16235b23f0 100644 --- a/gammapy/estimators/map/tests/test_ts.py +++ b/gammapy/estimators/map/tests/test_ts.py @@ -440,6 +440,32 @@ def test_ts_map_stat_scan(fake_dataset): combine_flux_maps([maps, maps1], method="test") +def test_ts_map_stat_scan_different_energy(fake_dataset): + model = fake_dataset.models["source"] + + dataset = fake_dataset.downsample(25) + + estimator = TSMapEstimator( + model, + kernel_width="0.3 deg", + energy_edges=[200, 3500] * u.GeV, + selection_optional=["stat_scan"], + ) + + estimator_1 = TSMapEstimator( + model, + kernel_width="0.3 deg", + selection_optional=["stat_scan"], + energy_edges=[0.2, 10] * u.TeV, + ) + + maps = estimator.run(dataset) + maps_1 = estimator_1.run(dataset) + + combined_map = combine_flux_maps([maps_1, maps], method="profile") + assert combined_map.ts.data.shape == (1, 2, 2) + + def test_ts_map_with_model(fake_dataset): model = fake_dataset.models["source"] fake_dataset = fake_dataset.copy() diff --git a/gammapy/estimators/map/ts.py b/gammapy/estimators/map/ts.py index 2a672623c9..bcf5491f87 100644 --- a/gammapy/estimators/map/ts.py +++ b/gammapy/estimators/map/ts.py @@ -65,7 +65,7 @@ class TSMapEstimator(Estimator, parallel.ParallelMixin): Parameters ---------- - model : `~gammapy.modeling.model.SkyModel` + model : `~gammapy.modeling.models.SkyModel` Source model kernel. If set to None, assume spatail model: point source model, PointSpatialModel. spectral model: PowerLawSpectral Model of index 2 @@ -155,7 +155,7 @@ class TSMapEstimator(Estimator, parallel.ParallelMixin): geom : WcsGeom axes : ['lon', 'lat', 'energy'] - shape : (400, 200, 1) + shape : (np.int64(400), np.int64(200), 1) quantities : ['ts', 'norm', 'niter', 'norm_err', 'npred', 'npred_excess', 'stat', 'stat_null', 'success'] ref. model : pl n_sigma : 1 @@ -471,7 +471,6 @@ def estimate_flux_map(self, datasets): dataset : `~gammapy.datasets.Datasets` or `~gammapy.datasets.MapDataset` Map dataset or Datasets (list of MapDataset with the same spatial geometry). """ - maps = [self.estimate_fit_input_maps(dataset=d) for d in datasets] mask = np.sum([_["mask"].data for _ in maps], axis=0).astype(bool) @@ -663,7 +662,7 @@ def __init__(self, model, counts, background, norm_guess): @lazyproperty def norm_bounds(self): - """Bounds for x""" + """Bounds for x.""" return norm_bounds_cython(self.counts, self.background, self.model) def npred(self, norm): @@ -874,7 +873,6 @@ def estimate_scan(self, dataset, result): result : dict Result dictionary including 'stat_scan'. """ - sparse_norms = _get_norm_scan_values(self.norm, result) scale = sparse_norms[None, :] @@ -1016,7 +1014,6 @@ def _ts_value( TS : float Test statistic value at the given pixel position. """ - datasets = [] nd = len(counts) for idx in range(nd): diff --git a/gammapy/estimators/points/core.py b/gammapy/estimators/points/core.py index 9de5b269a4..7b5b378756 100644 --- a/gammapy/estimators/points/core.py +++ b/gammapy/estimators/points/core.py @@ -29,13 +29,13 @@ def squash_fluxpoints(flux_point, axis): - """Squash a FluxPoints object into one point. + """Squash a `~FluxPoints` object into one point. + The log-likelihoods profiles in each bin are summed to compute the resultant quantities. Stat profiles - must be present on the fluxpoints object for + must be present on the `~FluxPoints` object for this method to work. """ - value_scan = flux_point.stat_scan.geom.axes["norm"].center stat_scan = np.sum(flux_point.stat_scan.data, axis=0).ravel() f = interp1d(value_scan, stat_scan, kind="quadratic", bounds_error=False) @@ -181,6 +181,7 @@ def read( format=None, reference_model=None, checksum=False, + table_format="ascii.ecsv", **kwargs, ): """Read precomputed flux points. @@ -198,6 +199,8 @@ def read( Reference spectral model. checksum : bool If True checks both DATASUM and CHECKSUM cards in the file headers. Default is False. + table_format : str + Format string for the ~astropy.Table object. Default is "ascii.ecsv" **kwargs : dict, optional Keyword arguments passed to `astropy.table.Table.read`. @@ -208,9 +211,8 @@ def read( """ filename = make_path(filename) gti = None - kwargs.setdefault("format", "ascii.ecsv") try: - table = Table.read(filename, **kwargs) + table = Table.read(filename, format=table_format, **kwargs) except (IORegistryError, UnicodeDecodeError): with fits.open(filename, checksum=checksum) as hdulist: if "FLUXPOINTS" in hdulist: @@ -315,8 +317,7 @@ def _table_guess_format(table): def from_table( cls, table, sed_type=None, format=None, reference_model=None, gti=None ): - """Create flux points from a table. The table column names must be consistent with the - sed_type. + """Create flux points from a table. The table column names must be consistent with the sed_type. Parameters ---------- @@ -434,7 +435,6 @@ def to_table(self, sed_type=None, format=None, formatted=False): If None, the format will be guessed by looking at the axes that are present in the object. Default is None. - formatted : bool Formatted version with column formats applied. Numerical columns are formatted to .3f and .3e respectively. Default is False. @@ -446,7 +446,6 @@ def to_table(self, sed_type=None, format=None, formatted=False): Examples -------- - This is how to read and plot example flux points: >>> from gammapy.estimators import FluxPoints @@ -592,7 +591,7 @@ def _energy_ref_lafferty(model, energy_min, energy_max): return model.inverse(dnde_mean) def _plot_get_flux_err(self, sed_type=None): - """Compute flux error for given SED type""" + """Compute flux error for given SED type.""" y_errn, y_errp = None, None if "norm_err" in self.available_quantities: @@ -772,6 +771,7 @@ def plot_ts_profiles( def recompute_ul(self, n_sigma_ul=2, **kwargs): """Recompute upper limits corresponding to the given value. + The pre-computed statistic profiles must exist for the re-computation. Parameters @@ -797,7 +797,6 @@ def recompute_ul(self, n_sigma_ul=2, **kwargs): >>> print(flux_points_recomputed.meta["n_sigma_ul"], flux_points_recomputed.flux_ul.data[0]) 3 [[6.22245374e-09]] """ - if not self.has_stat_profiles: raise ValueError( "Stat profiles not present. Upper limit computation is not possible" @@ -821,12 +820,13 @@ def recompute_ul(self, n_sigma_ul=2, **kwargs): def resample_axis(self, axis_new): """Rebin the flux point object along the new axis. + The log-likelihoods profiles in each bin are summed to compute the resultant quantities. - Stat profiles must be present on the fluxpoints object for + Stat profiles must be present on the `~gammapy.estimators.FluxPoints` object for this method to work. - For now, works only for flat fluxpoints. + For now, works only for flat `~gammapy.estimators.FluxPoints`. Parameters ---------- @@ -838,7 +838,6 @@ def resample_axis(self, axis_new): flux_points : `~gammapy.estimators.FluxPoints` A new FluxPoints object with modified axis. """ - if not self.has_stat_profiles: raise ValueError("Stat profiles not present, rebinning is not possible") diff --git a/gammapy/estimators/points/lightcurve.py b/gammapy/estimators/points/lightcurve.py index 07be9513c0..e887e75ef0 100644 --- a/gammapy/estimators/points/lightcurve.py +++ b/gammapy/estimators/points/lightcurve.py @@ -8,7 +8,6 @@ from gammapy.datasets import Datasets from gammapy.datasets.actors import DatasetsActor from gammapy.maps import LabelMapAxis, Map, TimeMapAxis -from gammapy.utils.deprecation import deprecated_attribute from gammapy.utils.pbar import progress_bar from .core import FluxPoints from .sed import FluxPointsEstimator @@ -43,14 +42,6 @@ class LightCurveEstimator(FluxPointsEstimator): For which source in the model to compute the flux points. Default is 0. atol : `~astropy.units.Quantity` Tolerance value for time comparison with different scale. Default 1e-6 sec. - norm_min : float - Minimum value for the norm used for the fit statistic profile evaluation. - norm_max : float - Maximum value for the norm used for the fit statistic profile evaluation. - norm_n_values : int - Number of norm values used for the fit statistic profile. - norm_values : `numpy.ndarray` - Array of norm values to be used for the fit statistic profile. n_sigma : int Number of sigma to use for asymmetric error computation. Default is 1. n_sigma_ul : int @@ -100,11 +91,6 @@ class LightCurveEstimator(FluxPointsEstimator): tag = "LightCurveEstimator" - norm_min = deprecated_attribute("norm_min", "1.2") - norm_max = deprecated_attribute("norm_max", "1.2") - norm_n_values = deprecated_attribute("norm_n_values", "1.2") - norm_values = deprecated_attribute("norm_values", "1.2") - def __init__(self, time_intervals=None, atol="1e-6 s", **kwargs): self.time_intervals = time_intervals self.atol = u.Quantity(atol) @@ -225,7 +211,6 @@ def estimate_time_bin_flux(self, datasets, dataset_names=None): result : `FluxPoints` Resulting flux points. """ - estimator = self.copy() estimator.n_jobs = self._n_child_jobs fp = estimator._run_flux_points(datasets) diff --git a/gammapy/estimators/points/profile.py b/gammapy/estimators/points/profile.py index 9f2e46e387..77a04870c6 100644 --- a/gammapy/estimators/points/profile.py +++ b/gammapy/estimators/points/profile.py @@ -1,5 +1,6 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst """Tools to create profiles (i.e. 1D "slices" from 2D images).""" + from itertools import repeat import numpy as np from astropy import units as u @@ -10,6 +11,7 @@ from gammapy.modeling.models import PowerLawSpectralModel, SkyModel from .core import FluxPoints from .sed import FluxPointsEstimator +from gammapy.utils.deprecation import deprecated_renamed_argument __all__ = ["FluxProfileEstimator"] @@ -21,7 +23,7 @@ class FluxProfileEstimator(FluxPointsEstimator): ---------- regions : list of `~regions.SkyRegion` Regions to use. - spectrum : `~gammapy.modeling.models.SpectralModel` (optional) + spectral_model : `~gammapy.modeling.models.SpectralModel`, optional Spectral model to compute the fluxes or brightness. Default is power-law with spectral index of 2. n_jobs : int, optional @@ -88,7 +90,8 @@ class FluxProfileEstimator(FluxPointsEstimator): tag = "FluxProfileEstimator" - def __init__(self, regions, spectrum=None, **kwargs): + @deprecated_renamed_argument("spectrum", "spectral_model", "v1.3") + def __init__(self, regions, spectral_model=None, **kwargs): if len(regions) <= 1: raise ValueError( "Please provide at least two regions for flux profile estimation." @@ -96,10 +99,10 @@ def __init__(self, regions, spectrum=None, **kwargs): self.regions = regions - if spectrum is None: - spectrum = PowerLawSpectralModel() + if spectral_model is None: + spectral_model = PowerLawSpectralModel() - self.spectrum = spectrum + self.spectral_model = spectral_model super().__init__(**kwargs) @property @@ -143,7 +146,6 @@ def run(self, datasets): profile : `~gammapy.estimators.FluxPoints` Profile flux points. """ - datasets = Datasets(datasets=datasets) maps = parallel.run_multiprocessing( @@ -170,7 +172,7 @@ def _run_region(self, datasets, region): .to_region_nd_map(region, func=np.sum, weights=dataset_map.mask_safe) .data ) - datasets_to_fit.models = SkyModel(self.spectrum, name="test-source") + datasets_to_fit.models = SkyModel(self.spectral_model, name="test-source") estimator = self.copy() estimator.n_jobs = self._n_child_jobs return estimator._run_flux_points(datasets_to_fit) diff --git a/gammapy/estimators/points/sed.py b/gammapy/estimators/points/sed.py index b846c958d6..fc56b03adb 100644 --- a/gammapy/estimators/points/sed.py +++ b/gammapy/estimators/points/sed.py @@ -10,7 +10,6 @@ from gammapy.datasets.flux_points import _get_reference_model from gammapy.maps import MapAxis from gammapy.modeling import Fit -from gammapy.utils.deprecation import deprecated_attribute from ..flux import FluxEstimator from .core import FluxPoints @@ -28,26 +27,16 @@ class FluxPointsEstimator(FluxEstimator, parallel.ParallelMixin): fitted within the energy range defined by the energy group. This is done for each group independently. The amplitude is re-normalized using the "norm" parameter, which specifies the deviation of the flux from the reference model in this - energy group. See https://gamma-astro-data-formats.readthedocs.io/en/latest/spectra/binned_likelihoods/index.html # noqa: E501 + energy group. See https://gamma-astro-data-formats.readthedocs.io/en/latest/spectra/binned_likelihoods/index.html for details. - The method is also described in the Fermi-LAT catalog paper - https://ui.adsabs.harvard.edu/abs/2015ApJS..218...23A - or the HESS Galactic Plane Survey paper - https://ui.adsabs.harvard.edu/abs/2018A%26A...612A...1H + The method is also described in the `Fermi-LAT catalog paper `__ + or the `H.E.S.S. Galactic Plane Survey paper `__ Parameters ---------- source : str or int For which source in the model to compute the flux points. - norm_min : float - Minimum value for the norm used for the fit statistic profile evaluation. - norm_max : float - Maximum value for the norm used for the fit statistic profile evaluation. - norm_n_values : int - Number of norm values used for the fit statistic profile. - norm_values : `numpy.ndarray` - Array of norm values to be used for the fit statistic profile. n_sigma : int Number of sigma to use for asymmetric error computation. Default is 1. n_sigma_ul : int @@ -94,11 +83,6 @@ class FluxPointsEstimator(FluxEstimator, parallel.ParallelMixin): tag = "FluxPointsEstimator" - norm_min = deprecated_attribute("norm_min", "1.2") - norm_max = deprecated_attribute("norm_max", "1.2") - norm_n_values = deprecated_attribute("norm_n_values", "1.2") - norm_values = deprecated_attribute("norm_values", "1.2") - def __init__( self, energy_edges=[1, 10] * u.TeV, @@ -129,7 +113,6 @@ def run(self, datasets): flux_points : `FluxPoints` Estimated flux points. """ - if not isinstance(datasets, DatasetsActor): datasets = Datasets(datasets=datasets) @@ -214,7 +197,7 @@ def estimate_flux_point(self, datasets, energy_min, energy_max): return self._nan_result(datasets, model, energy_min, energy_max) def _nan_result(self, datasets, model, energy_min, energy_max): - """NaN result""" + """NaN result.""" energy_axis = MapAxis.from_energy_edges([energy_min, energy_max]) with np.errstate(invalid="ignore", divide="ignore"): diff --git a/gammapy/estimators/points/sensitivity.py b/gammapy/estimators/points/sensitivity.py index 3c2730f592..3aa5be991d 100644 --- a/gammapy/estimators/points/sensitivity.py +++ b/gammapy/estimators/points/sensitivity.py @@ -5,6 +5,7 @@ from gammapy.modeling.models import PowerLawSpectralModel, SkyModel from gammapy.stats import WStatCountsStatistic from ..core import Estimator +from gammapy.utils.deprecation import deprecated_renamed_argument __all__ = ["SensitivityEstimator"] @@ -20,8 +21,8 @@ class SensitivityEstimator(Estimator): Parameters ---------- - spectrum : `SpectralModel` - Spectral model assumption. Default is Power Law with index 2. + spectral_model : `~gammapy.modeling.models.SpectralModel`, optional + Spectral model assumption. Default is power-law with spectral index of 2. n_sigma : float, optional Minimum significance. Default is 5. gamma_min : float, optional @@ -37,18 +38,20 @@ class SensitivityEstimator(Estimator): tag = "SensitivityEstimator" + @deprecated_renamed_argument("spectrum", "spectral_model", "v1.3") def __init__( self, - spectrum=None, + spectral_model=None, n_sigma=5.0, gamma_min=10, bkg_syst_fraction=0.05, ): + if spectral_model is None: + spectral_model = PowerLawSpectralModel( + index=2, amplitude="1 cm-2 s-1 TeV-1" + ) - if spectrum is None: - spectrum = PowerLawSpectralModel(index=2, amplitude="1 cm-2 s-1 TeV-1") - - self.spectrum = spectrum + self.spectral_model = spectral_model self.n_sigma = n_sigma self.gamma_min = gamma_min self.bkg_syst_fraction = bkg_syst_fraction @@ -100,12 +103,12 @@ def estimate_min_e2dnde(self, excess, dataset): """ energy = dataset._geom.axes["energy"].center - dataset.models = SkyModel(spectral_model=self.spectrum) + dataset.models = SkyModel(spectral_model=self.spectral_model) npred = dataset.npred_signal() phi_0 = excess / npred - dnde_model = self.spectrum(energy=energy) + dnde_model = self.spectral_model(energy=energy) dnde = phi_0.data[:, 0, 0] * dnde_model * energy**2 return dnde.to("erg / (cm2 s)") diff --git a/gammapy/estimators/points/tests/test_profile.py b/gammapy/estimators/points/tests/test_profile.py index 8b9eaae10d..405805e48a 100644 --- a/gammapy/estimators/points/tests/test_profile.py +++ b/gammapy/estimators/points/tests/test_profile.py @@ -17,6 +17,7 @@ make_orthogonal_rectangle_sky_regions, ) from gammapy.utils.testing import requires_data, requires_dependency +from gammapy.utils.deprecation import GammapyDeprecationWarning def get_simple_dataset_on_off(): @@ -54,6 +55,13 @@ def test_profile_content(): wcs = mapdataset_onoff.counts.geom.wcs boxes = make_horizontal_boxes(wcs) + with pytest.warns(GammapyDeprecationWarning): + FluxProfileEstimator( + regions=boxes, + energy_edges=[0.1, 1, 10] * u.TeV, + spectrum=PowerLawSpectralModel(), + ) + prof_maker = FluxProfileEstimator( regions=boxes, energy_edges=[0.1, 1, 10] * u.TeV, @@ -61,6 +69,7 @@ def test_profile_content(): n_sigma=1, n_sigma_ul=3, ) + result = prof_maker.run(mapdataset_onoff) imp_prof = result.to_table(sed_type="flux") diff --git a/gammapy/estimators/points/tests/test_sensitivity.py b/gammapy/estimators/points/tests/test_sensitivity.py index 926b2bf18b..bc3fecb993 100644 --- a/gammapy/estimators/points/tests/test_sensitivity.py +++ b/gammapy/estimators/points/tests/test_sensitivity.py @@ -5,6 +5,8 @@ from gammapy.estimators import FluxPoints, SensitivityEstimator from gammapy.irf import EDispKernelMap from gammapy.maps import MapAxis, RegionNDMap +from gammapy.modeling.models import PowerLawSpectralModel +from gammapy.utils.deprecation import GammapyDeprecationWarning @pytest.fixture() @@ -37,6 +39,10 @@ def test_cta_sensitivity_estimator(spectrum_dataset, caplog): acceptance=RegionNDMap.from_geom(geom=geom, data=1), acceptance_off=RegionNDMap.from_geom(geom=geom, data=5), ) + with pytest.warns(GammapyDeprecationWarning): + SensitivityEstimator( + gamma_min=25, bkg_syst_fraction=0.075, spectrum=PowerLawSpectralModel() + ) sens = SensitivityEstimator(gamma_min=25, bkg_syst_fraction=0.075) table = sens.run(dataset_on_off) @@ -89,7 +95,7 @@ def test_integral_estimation(spectrum_dataset, caplog): sens = SensitivityEstimator(gamma_min=25, bkg_syst_fraction=0.075) table = sens.run(dataset_on_off) flux_points = FluxPoints.from_table( - table, sed_type="e2dnde", reference_model=sens.spectrum + table, sed_type="e2dnde", reference_model=sens.spectral_model ) assert_allclose(table["excess"].data.squeeze(), 270540, rtol=1e-3) diff --git a/gammapy/estimators/utils.py b/gammapy/estimators/utils.py index 61b4761602..1f50fa863b 100644 --- a/gammapy/estimators/utils.py +++ b/gammapy/estimators/utils.py @@ -195,14 +195,13 @@ def find_peaks_in_flux_map(maps, threshold, min_distance=1): >>> # Find the peaks which are above 5 sigma >>> sources = find_peaks_in_flux_map(maps, threshold=5, min_distance=0.1*u.deg) >>> print(sources[:4]) - x y ra dec ... norm norm_err flux flux_err - deg deg ... 1 / (s cm2) 1 / (s cm2) - --- --- --------- --------- ... ------- -------- ----------- ----------- - 158 135 266.05019 -28.70181 ... 0.28551 0.06450 2.827e-12 6.385e-13 - 92 133 267.07022 -27.31834 ... 0.37058 0.08342 3.669e-12 8.259e-13 - 176 134 265.80492 -29.09805 ... 0.30561 0.06549 3.025e-12 6.484e-13 - 282 150 263.78083 -31.12704 ... 0.55027 0.12611 5.448e-12 1.249e-12 - + x y ra dec npred npred_excess counts ts sqrt_ts norm norm_err flux flux_err + deg deg 1 / (s cm2) 1 / (s cm2) + --- --- --------- --------- --------- ------------ --------- -------- ------- ------- -------- ----------- ----------- + 158 135 266.05019 -28.70181 192.00000 61.33788 192.00000 25.11839 5.01183 0.28551 0.06450 2.827e-12 6.385e-13 + 92 133 267.07022 -27.31834 137.00000 51.99467 137.00000 26.78181 5.17511 0.37058 0.08342 3.669e-12 8.259e-13 + 176 134 265.80492 -29.09805 195.00000 65.15990 195.00000 28.29158 5.31898 0.30561 0.06549 3.025e-12 6.484e-13 + 282 150 263.78083 -31.12704 84.00000 39.99004 84.00000 28.61526 5.34932 0.55027 0.12611 5.448e-12 1.249e-12 """ quantity_for_peaks = maps["sqrt_ts"] @@ -393,7 +392,8 @@ def resample_energy_edges(dataset, conditions={}): def compute_lightcurve_fvar(lightcurve, flux_quantity="flux"): - r"""Compute the fractional excess variance of the input lightcurve. + """ + Compute the fractional excess variance of the input lightcurve. Internally calls the `~gammapy.stats.compute_fvar` function. @@ -410,7 +410,6 @@ def compute_lightcurve_fvar(lightcurve, flux_quantity="flux"): fvar : `~astropy.table.Table` Table of fractional excess variance and associated error for each energy bin of the lightcurve. """ - flux = getattr(lightcurve, flux_quantity) flux_err = getattr(lightcurve, flux_quantity + "_err") @@ -431,7 +430,8 @@ def compute_lightcurve_fvar(lightcurve, flux_quantity="flux"): def compute_lightcurve_fpp(lightcurve, flux_quantity="flux"): - r"""Compute the point-to-point excess variance of the input lightcurve. + """ + Compute the point-to-point excess variance of the input lightcurve. Internally calls the `~gammapy.stats.compute_fpp` function @@ -447,7 +447,6 @@ def compute_lightcurve_fpp(lightcurve, flux_quantity="flux"): table : `~astropy.table.Table` Table of point-to-point excess variance and associated error for each energy bin of the lightcurve. """ - flux = getattr(lightcurve, flux_quantity) flux_err = getattr(lightcurve, flux_quantity + "_err") @@ -468,7 +467,8 @@ def compute_lightcurve_fpp(lightcurve, flux_quantity="flux"): def compute_lightcurve_doublingtime(lightcurve, flux_quantity="flux"): - r"""Compute the minimum characteristic flux doubling and halving time for the input lightcurve. + """ + Compute the minimum characteristic flux doubling and halving time for the input lightcurve. Internally calls the `~gammapy.stats.compute_flux_doubling` function. @@ -499,11 +499,10 @@ def compute_lightcurve_doublingtime(lightcurve, flux_quantity="flux"): References ---------- - ..[Brown2013] "Locating the γ-ray emission region - of the flat spectrum radio quasar PKS 1510−089", Brown et al. (2013) - https://academic.oup.com/mnras/article/431/1/824/1054498 + .. [Brown2013] "Locating the γ-ray emission region + of the flat spectrum radio quasar PKS 1510−089", Brown et al. (2013) + https://academic.oup.com/mnras/article/431/1/824/1054498 """ - flux = getattr(lightcurve, flux_quantity) flux_err = getattr(lightcurve, flux_quantity + "_err") coords = lightcurve.geom.axes["time"].center @@ -545,10 +544,11 @@ def compute_lightcurve_doublingtime(lightcurve, flux_quantity="flux"): def compute_lightcurve_discrete_correlation( lightcurve1, lightcurve2=None, flux_quantity="flux", tau=None ): - r"""Compute the discrete correlation function for two lightcurves, or the discrete autocorrelation if only one lightcurve is provided. + """Compute the discrete correlation function for two lightcurves, or the discrete autocorrelation if only one lightcurve is provided. + NaN values will be ignored in the computation in order to account for possible gaps in the data. - Internally calls the `~gammapy.stats.discrete_correlation` function + Internally calls the `~gammapy.stats.discrete_correlation` function. Parameters ---------- @@ -564,22 +564,21 @@ def compute_lightcurve_discrete_correlation( Size of the bins to compute the discrete correlation. If None, the bin size will be double the bins of the first lightcurve. Default is None. - Returns ------- discrete_correlation_dict : dict - Dictionary containing: - "bins" : the array of discrete time bins - "discrete_correlation" : discrete correlation function values - "discrete_correlation_err" : associated error + Dictionary containing the discrete correlation results. Entries are: + + * "bins" : the array of discrete time bins + * "discrete_correlation" : discrete correlation function values + * "discrete_correlation_err" : associated error References ---------- .. [Edelson1988] "THE DISCRETE CORRELATION FUNCTION: A NEW METHOD FOR ANALYZING - UNEVENLY SAMPLED VARIABILITY DATA", Edelson et al. (1988) - https://ui.adsabs.harvard.edu/abs/1988ApJ...333..646E/abstract + UNEVENLY SAMPLED VARIABILITY DATA", Edelson et al. (1988) + https://ui.adsabs.harvard.edu/abs/1988ApJ...333..646E/abstract """ - flux1 = getattr(lightcurve1, flux_quantity) flux_err1 = getattr(lightcurve1, flux_quantity + "_err") coords1 = lightcurve1.geom.axes["time"].center @@ -643,7 +642,6 @@ def get_edges_fixed_bins(fluxpoint, group_size, axis_name="energy"): edges_max : `~astropy.units.Quantity` or `~astropy.time.Time` Maximum bin edge for the new axis. """ - ax = fluxpoint.geom.axes[axis_name] nbin = ax.nbin if not isinstance(group_size, int): @@ -782,7 +780,8 @@ def get_rebinned_axis(fluxpoint, axis_name="energy", method=None, **kwargs): def get_combined_significance_maps(estimator, datasets): - """Computes excess and significance for a set of datasets. + """Compute excess and significance for a set of datasets. + The significance computation assumes that the model contains one degree of freedom per valid energy bin in each dataset. This method implemented here is valid under the assumption @@ -795,20 +794,20 @@ def get_combined_significance_maps(estimator, datasets): Parameters ---------- - estimator : `~gammapy.estimator.ExcessMapEstimator` or `~gammapy.estimator.TSMapEstimator` + estimator : `~gammapy.estimators.ExcessMapEstimator` or `~gammapy.estimators.TSMapEstimator` Excess Map Estimator or TS Map Estimator dataset : `~gammapy.datasets.Datasets` - Datasets containing only `~gammapy.maps.MapDataset`. + Datasets containing only `~gammapy.datasets.MapDataset`. Returns ------- results : dict - Dictionary with keys : - - "significance" : joint significance map. - - "df" : degree of freedom map (one norm per valid bin). - - "npred_excess" : summed excess map. - - "estimator_results" : dictionary containing the estimator results for each dataset. + Dictionary with entries: + * "significance" : joint significance map. + * "df" : degree of freedom map (one norm per valid bin). + * "npred_excess" : summed excess map. + * "estimator_results" : dictionary containing the estimator results for each dataset. """ from .map.excess import ExcessMapEstimator from .map.ts import TSMapEstimator @@ -850,6 +849,7 @@ def combine_flux_maps( maps, method="gaussian_errors", reference_model=None, dnde_scan_axis=None ): """Create a FluxMaps by combining a list of flux maps with the same geometry. + This assumes the flux maps are independent measurements of the same true value. The GTI is stacked in the process. @@ -887,7 +887,6 @@ def combine_flux_maps( flux_maps : `~gammapy.estimators.FluxMaps` Joint flux map. """ - gtis = [map_.gti for map_ in maps if map_.gti is not None] if np.any(gtis): gti = gtis[0].copy() @@ -959,7 +958,7 @@ def combine_flux_maps( if k == 0: stat_scan = map_stat_scan else: - stat_scan += map_stat_scan + stat_scan.data += map_stat_scan.data return get_flux_map_from_profile( {"stat_scan": stat_scan}, @@ -990,7 +989,7 @@ def _default_scan_map(flux_map, dnde_scan_axis=None): def interpolate_profile_map(flux_map, dnde_scan_axis=None): - """Interpolate sparse likelihood profile to regular grid + """Interpolate sparse likelihood profile to regular grid. Parameters ---------- @@ -1006,7 +1005,6 @@ def interpolate_profile_map(flux_map, dnde_scan_axis=None): Likelihood profile map. """ - stat_scan = _default_scan_map(flux_map, dnde_scan_axis) dnde_scan_axis = stat_scan.geom.axes["dnde"] @@ -1048,9 +1046,7 @@ def approximate_profile_map( ------- scan_map: `~gammapy.estimators.Maps` Likelihood profile map. - """ - stat_approx = _default_scan_map(flux_map, dnde_scan_axis) dnde_coord = stat_approx.geom.get_coord()["dnde"].value @@ -1132,13 +1128,13 @@ def get_flux_map_from_profile( reference_model : `~gammapy.modeling.models.SkyModel`, optional The reference model to use for conversions. If None, a model consisting of a point source with a power law spectrum of index 2 is assumed. - Default is None and and the one of `flux_map` will be used if available + Default is None and the one of `flux_map` will be used if available meta : dict, optional Dict of metadata. - Default is None and and the oneof `flux_map` will be used if available + Default is None and the one of `flux_map` will be used if available gti : `~gammapy.data.GTI`, optional Maps GTI information. - Default is None and and the one of `flux_map` will be used if available + Default is None and the one of `flux_map` will be used if available Returns ------- @@ -1146,7 +1142,6 @@ def get_flux_map_from_profile( Flux map. """ - if isinstance(flux_map, dict): output_maps = flux_map else: @@ -1224,13 +1219,13 @@ def _generate_scan_values(power_min=-6, power_max=2, relative_error=1e-2): def _get_default_norm( norm, - scan_min=None, - scan_max=None, - scan_n_values=None, + scan_min=0.2, + scan_max=5, + scan_n_values=11, scan_values=None, interp="lin", ): - """create default norm parameter""" + """Create default norm parameter.""" if norm is None or isinstance(norm, dict): norm_kwargs = dict( name="norm", @@ -1256,7 +1251,6 @@ def _get_default_norm( def _get_norm_scan_values(norm, result): """Compute norms based on the fit result to sample the stat profile at different scales.""" - norm_err = result["norm_err"] norm_value = result["norm"] if ~np.isfinite(norm_err) or norm_err == 0: diff --git a/gammapy/extern/xmltodict.py b/gammapy/extern/xmltodict.py index 180b815377..66a29b4600 100644 --- a/gammapy/extern/xmltodict.py +++ b/gammapy/extern/xmltodict.py @@ -173,7 +173,7 @@ def parse( Simple example:: - >>> import xmltodict + >>> from gammapy.extern import xmltodict >>> doc = xmltodict.parse(\"\"\" ... ... 1 @@ -181,9 +181,9 @@ def parse( ... ... \"\"\") >>> doc['a']['@prop'] - u'x' + 'x' >>> doc['a']['b'] - [u'1', u'2'] + ['1', '2'] If `item_depth` is `0`, the function returns a dictionary for the root element (default behavior). Otherwise, it calls `item_callback` every time @@ -198,7 +198,7 @@ def parse( Streaming example:: >>> def handle(path, item): - ... print 'path:%s item:%s' % (path, item) + ... print('path:%s item:%s' % (path, item)) ... return True ... >>> xmltodict.parse(\"\"\" @@ -206,8 +206,8 @@ def parse( ... 1 ... 2 ... \"\"\", item_depth=2, item_callback=handle) - path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:1 - path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:2 + path:[('a', OrderedDict([('prop', 'x')])), ('b', None)] item: 1 + path:[('a', OrderedDict([('prop', 'x')])), ('b', None)] item: 2 The optional argument `postprocessor` is a function that takes `path`, `key` and `value` as positional arguments and returns a new `(key, value)` @@ -220,7 +220,7 @@ def parse( ... return key, value >>> xmltodict.parse('12x', ... postprocessor=postprocessor) - OrderedDict([(u'a', OrderedDict([(u'b:int', [1, 2]), (u'b', u'x')]))]) + OrderedDict([('a', OrderedDict([('b:int', [1, 2]), ('b', 'x')]))]) You can pass an alternate version of `expat` (such as `defusedexpat`) by using the `expat` parameter. E.g: diff --git a/gammapy/irf/edisp/map.py b/gammapy/irf/edisp/map.py index 3115d5133e..53cb1571cd 100644 --- a/gammapy/irf/edisp/map.py +++ b/gammapy/irf/edisp/map.py @@ -35,7 +35,7 @@ class EDispMap(IRFMap): -------- :: - # Energy dispersion map for CTA data + # Energy dispersion map for CTAO data import numpy as np from astropy import units as u from astropy.coordinates import SkyCoord diff --git a/gammapy/irf/effective_area.py b/gammapy/irf/effective_area.py index 291ec9915d..851f2b16a0 100644 --- a/gammapy/irf/effective_area.py +++ b/gammapy/irf/effective_area.py @@ -1,4 +1,5 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst +import warnings import numpy as np import astropy.units as u from astropy.visualization import quantity_support @@ -7,6 +8,7 @@ from gammapy.maps.axes import UNIT_STRING_FORMAT from gammapy.visualization.utils import add_colorbar from .core import IRF +from gammapy.utils.deprecation import GammapyDeprecationWarning __all__ = ["EffectiveAreaTable2D"] @@ -239,7 +241,7 @@ def from_parametrization(cls, energy_axis_true=None, instrument="HESS"): energy_axis_true : `MapAxis`, optional Energy binning, analytic function is evaluated at log centers. Default is None. - instrument : {'HESS', 'HESS2', 'CTA'} + instrument : {'HESS', 'HESS2', 'CTAO'} Instrument name. Default is 'HESS'. Returns @@ -252,9 +254,15 @@ def from_parametrization(cls, energy_axis_true=None, instrument="HESS"): pars = { "HESS": [6.85e9, 0.0891, 5e5], "HESS2": [2.05e9, 0.0891, 1e5], - "CTA": [1.71e11, 0.0891, 1e5], + "CTAO": [1.71e11, 0.0891, 1e5], } + if instrument == "CTA": + instrument = "CTAO" + warnings.warn( + "Since v1.3, the value 'CTA' is replaced by 'CTAO' for the argument instrument.", + GammapyDeprecationWarning, + ) if instrument not in pars.keys(): ss = f"Unknown instrument: {instrument}\n" ss += f"Valid instruments: {list(pars.keys())}" diff --git a/gammapy/irf/psf/kernel.py b/gammapy/irf/psf/kernel.py index d13a8e3a49..b7d102d3e1 100644 --- a/gammapy/irf/psf/kernel.py +++ b/gammapy/irf/psf/kernel.py @@ -5,6 +5,7 @@ import matplotlib.pyplot as plt from gammapy.maps import Map from gammapy.modeling.models import PowerLawSpectralModel +from gammapy.utils.deprecation import deprecated_renamed_argument __all__ = ["PSFKernel"] @@ -161,12 +162,13 @@ def write(self, *args, **kwargs): """Write the Map object which contains the PSF kernel to file.""" self.psf_kernel_map.write(*args, **kwargs) - def to_image(self, spectrum=None, exposure=None, keepdims=True): + @deprecated_renamed_argument("spectrum", "spectral_model", "v1.3") + def to_image(self, spectral_model=None, exposure=None, keepdims=True): """Transform 3D PSFKernel into a 2D PSFKernel. Parameters ---------- - spectrum : `~gammapy.modeling.models.SpectralModel`, optional + spectral_model : `~gammapy.modeling.models.SpectralModel`, optional Spectral model to compute the weights. Default is power-law with spectral index of 2. exposure : `~astropy.units.Quantity` or `~numpy.ndarray`, optional @@ -184,8 +186,8 @@ def to_image(self, spectrum=None, exposure=None, keepdims=True): """ map = self.psf_kernel_map - if spectrum is None: - spectrum = PowerLawSpectralModel(index=2.0) + if spectral_model is None: + spectral_model = PowerLawSpectralModel(index=2.0) if exposure is None: exposure = np.ones(map.geom.axes[0].center.shape) @@ -200,7 +202,7 @@ def to_image(self, spectrum=None, exposure=None, keepdims=True): if "energy" in name: energy_name = name energy_edges = map.geom.axes[energy_name].edges - weights = spectrum.integral( + weights = spectral_model.integral( energy_min=energy_edges[:-1], energy_max=energy_edges[1:], intervals=True ) weights *= exposure diff --git a/gammapy/irf/psf/map.py b/gammapy/irf/psf/map.py index 88eaeec1f8..7aaa9c3496 100644 --- a/gammapy/irf/psf/map.py +++ b/gammapy/irf/psf/map.py @@ -7,12 +7,12 @@ from gammapy.maps import HpxGeom, Map, MapAxes, MapAxis, MapCoord, WcsGeom from gammapy.maps.axes import UNIT_STRING_FORMAT from gammapy.modeling.models import PowerLawSpectralModel -from gammapy.utils.deprecation import deprecated_renamed_argument from gammapy.utils.gauss import Gauss2DPDF from gammapy.utils.random import InverseCDFSampler, get_random_state from ..core import IRFMap from .core import PSF from .kernel import PSFKernel +from gammapy.utils.deprecation import deprecated_renamed_argument __all__ = ["PSFMap", "RecoPSFMap"] @@ -248,16 +248,12 @@ def containment_radius_map(self, energy_true, fraction=0.68): ) return Map.from_geom(geom=geom, data=data.value, unit=data.unit) - @deprecated_renamed_argument( - "factor", "precision_factor", "v1.2", arg_in_kwargs=True - ) def get_psf_kernel( self, geom, position=None, max_radius=None, containment=0.999, - factor=None, precision_factor=12, ): """Return a PSF kernel at the given position. @@ -278,9 +274,6 @@ def get_psf_kernel( Containment fraction to use as size of the kernel. The radius can be overwritten using the `max_radius` argument. Default is 0.999. - factor : int, optional - Oversampling factor to compute the PSF. - Default is None and it will be computed automatically. precision_factor : int, optional Factor between the bin half-width of the geom and the median R68% containment radius. Used only for the oversampling method. Default is 10. @@ -313,10 +306,7 @@ def get_psf_kernel( radii[radii > max_radius] = max_radius n_radii = len(radii) - if factor is None: # TODO: this remove and the else once factor is deprecated - factor = _psf_upsampling_factor(self, geom, position, precision_factor) - else: - factor = [factor] * n_radii + factor = _psf_upsampling_factor(self, geom, position, precision_factor) geom = geom.to_odd_npix(max_radius=max_radius) kernel_map = Map.from_geom(geom=geom) for im, ind in zip(kernel_map.iter_by_image(keepdims=True), range(n_radii)): @@ -458,12 +448,13 @@ def from_gauss(cls, energy_axis_true, rad_axis=None, sigma=0.1 * u.deg, geom=Non ) return cls(psf_map=psf_map, exposure_map=exposure_map) - def to_image(self, spectrum=None, keepdims=True): + @deprecated_renamed_argument("spectrum", "spectral_model", "v1.3") + def to_image(self, spectral_model=None, keepdims=True): """Reduce to a 2D map after weighing with the associated exposure and a spectrum. Parameters ---------- - spectrum : `~gammapy.modeling.models.SpectralModel`, optional + spectral_model : `~gammapy.modeling.models.SpectralModel`, optional Spectral model to compute the weights. Default is power-law with spectral index of 2. keepdims : bool, optional @@ -477,10 +468,10 @@ def to_image(self, spectrum=None, keepdims=True): """ from gammapy.makers.utils import _map_spectrum_weight - if spectrum is None: - spectrum = PowerLawSpectralModel(index=2.0) + if spectral_model is None: + spectral_model = PowerLawSpectralModel(index=2.0) - exp_weighed = _map_spectrum_weight(self.exposure_map, spectrum) + exp_weighed = _map_spectrum_weight(self.exposure_map, spectral_model) exposure = exp_weighed.sum_over_axes( axes_names=[self.energy_name], keepdims=keepdims ) diff --git a/gammapy/irf/psf/tests/test_kernel.py b/gammapy/irf/psf/tests/test_kernel.py index dcf452e11c..67f27effe4 100644 --- a/gammapy/irf/psf/tests/test_kernel.py +++ b/gammapy/irf/psf/tests/test_kernel.py @@ -5,8 +5,9 @@ import astropy.units as u from gammapy.irf import PSFKernel from gammapy.maps import MapAxis, WcsGeom -from gammapy.modeling.models import DiskSpatialModel +from gammapy.modeling.models import DiskSpatialModel, PowerLawSpectralModel from gammapy.utils.testing import mpl_plot_check +from gammapy.utils.deprecation import GammapyDeprecationWarning @pytest.fixture @@ -51,6 +52,9 @@ def test_psf_kernel_to_image(): kernel1 = PSFKernel.from_spatial_model(disk_1, geom, max_radius=rad_max, factor=4) kernel2 = PSFKernel.from_spatial_model(disk_2, geom, max_radius=rad_max, factor=4) + with pytest.warns(GammapyDeprecationWarning): + kernel1.to_image(spectrum=PowerLawSpectralModel()) + kernel1.psf_kernel_map.data[1, :, :] = kernel2.psf_kernel_map.data[1, :, :] kernel_image_1 = kernel1.to_image() diff --git a/gammapy/irf/psf/tests/test_map.py b/gammapy/irf/psf/tests/test_map.py index d2ef82787f..5b2f30c987 100644 --- a/gammapy/irf/psf/tests/test_map.py +++ b/gammapy/irf/psf/tests/test_map.py @@ -10,6 +10,8 @@ from gammapy.makers.utils import make_map_exposure_true_energy, make_psf_map from gammapy.maps import Map, MapAxis, MapCoord, RegionGeom, WcsGeom from gammapy.utils.testing import mpl_plot_check, requires_data +from gammapy.modeling.models import PowerLawSpectralModel +from gammapy.utils.deprecation import GammapyDeprecationWarning @pytest.fixture(scope="session") @@ -423,6 +425,9 @@ def test_psf_map_write_gtpsf(tmpdir): def test_to_image(): psfmap = make_test_psfmap(0.15 * u.deg) + with pytest.warns(GammapyDeprecationWarning): + psfmap.to_image(spectrum=PowerLawSpectralModel()) + psf2D = psfmap.to_image() assert_allclose(psf2D.psf_map.geom.data_shape, (1, 100, 25, 25)) assert_allclose(psf2D.exposure_map.geom.data_shape, (1, 1, 25, 25)) diff --git a/gammapy/irf/tests/test_effective_area.py b/gammapy/irf/tests/test_effective_area.py index f986c995f0..90bab7df01 100644 --- a/gammapy/irf/tests/test_effective_area.py +++ b/gammapy/irf/tests/test_effective_area.py @@ -10,6 +10,7 @@ mpl_plot_check, requires_data, ) +from gammapy.utils.deprecation import GammapyDeprecationWarning @pytest.fixture(scope="session") @@ -54,6 +55,13 @@ def test_from_parametrization(): assert area.unit == area_ref.unit assert area.meta["TELESCOP"] == "HESS" + with pytest.warns(GammapyDeprecationWarning): + area2 = EffectiveAreaTable2D.from_parametrization(axis, "CTA") + assert area2.unit == area_ref.unit + + with pytest.raises(ValueError): + area2 = EffectiveAreaTable2D.from_parametrization(axis, "SWIFT") + @requires_data() def test_plot(aeff): diff --git a/gammapy/makers/map.py b/gammapy/makers/map.py index 13257f9856..d8d39cb99c 100644 --- a/gammapy/makers/map.py +++ b/gammapy/makers/map.py @@ -34,7 +34,7 @@ class MapDatasetMaker(Maker): Background evaluation oversampling factor in energy. background_interp_missing_data : bool, optional Interpolate missing values in background 3d map. - Default is True, have to be set to True for CTA IRF. + Default is True, have to be set to True for CTAO IRF. background_pad_offset : bool, optional Pad one bin in offset for 2d background map. This avoids extrapolation at edges and use the nearest value. diff --git a/gammapy/modeling/fit.py b/gammapy/modeling/fit.py index 083087ebff..2804f8dfb1 100644 --- a/gammapy/modeling/fit.py +++ b/gammapy/modeling/fit.py @@ -308,7 +308,7 @@ def covariance(self, datasets, optimize_result=None): method=method, success=info["success"], message=info["message"], - matrix=optimize_result.models.covariance.data, + matrix=matrix.data, ) def confidence(self, datasets, parameter, sigma=1, reoptimize=True): diff --git a/gammapy/modeling/models/core.py b/gammapy/modeling/models/core.py index 821abf1be3..39205c5f17 100644 --- a/gammapy/modeling/models/core.py +++ b/gammapy/modeling/models/core.py @@ -241,7 +241,6 @@ def to_dict(self, full_output=False): "error", "interp", "scale_method", - "is_norm", ]: default = init[item] diff --git a/gammapy/modeling/models/cube.py b/gammapy/modeling/models/cube.py index 5de9e267ca..eeb82d8e0b 100644 --- a/gammapy/modeling/models/cube.py +++ b/gammapy/modeling/models/cube.py @@ -2,6 +2,7 @@ """Cube models (axes: lon, lat, energy).""" import logging +import warnings import os import numpy as np import astropy.units as u @@ -14,6 +15,7 @@ from gammapy.utils.compat import COPY_IF_NEEDED from gammapy.utils.fits import LazyFitsData from gammapy.utils.scripts import make_name, make_path +from gammapy.utils.deprecation import GammapyDeprecationWarning from .core import Model, ModelBase, Models from .spatial import ConstantSpatialModel, SpatialModel from .spectral import PowerLawNormSpectralModel, SpectralModel, TemplateSpectralModel @@ -628,12 +630,12 @@ class FoVBackgroundModel(ModelBase): Parameters ---------- - spectral_model : `~gammapy.modeling.models.SpectralModel` - Normalized spectral model. - Default is `~gammapy.modeling.models.PowerLawNormSpectralModel` dataset_name : str Dataset name. - spatial_model : `~gammapy.modeling.models.SpatialModel` + spectral_model : `~gammapy.modeling.models.SpectralModel`, Optional + Normalized spectral model. + Default is `~gammapy.modeling.models.PowerLawNormSpectralModel` + spatial_model : `~gammapy.modeling.models.SpatialModel`, Optional Unitless Spatial model (unit is dropped on evaluation if defined). Default is None. """ @@ -642,13 +644,21 @@ class FoVBackgroundModel(ModelBase): def __init__( self, + dataset_name, spectral_model=None, - dataset_name=None, spatial_model=None, covariance_data=None, ): - if dataset_name is None: - raise ValueError("Dataset name a is required argument") + # TODO: remove this in v2.0 + if isinstance(dataset_name, SpectralModel): + warnings.warn( + "dataset_name has been made first argument since v1.3.", + GammapyDeprecationWarning, + stacklevel=2, + ) + buf = dataset_name + dataset_name = spectral_model + spectral_model = buf self.datasets_names = [dataset_name] diff --git a/gammapy/modeling/models/spectral.py b/gammapy/modeling/models/spectral.py index 73fedf0911..ac0942644b 100644 --- a/gammapy/modeling/models/spectral.py +++ b/gammapy/modeling/models/spectral.py @@ -696,7 +696,6 @@ class ConstantSpectralModel(SpectralModel): tag = ["ConstantSpectralModel", "const"] const = Parameter("const", "1e-12 cm-2 s-1 TeV-1") - const._is_norm = True @staticmethod def evaluate(energy, const): @@ -811,7 +810,6 @@ class PowerLawSpectralModel(SpectralModel): scale_method="scale10", interp="log", ) - amplitude._is_norm = True reference = Parameter("reference", "1 TeV", frozen=True) @staticmethod @@ -936,7 +934,6 @@ class PowerLawNormSpectralModel(SpectralModel): tag = ["PowerLawNormSpectralModel", "pl-norm"] norm = Parameter("norm", 1, unit="", interp="log") - norm._is_norm = True tilt = Parameter("tilt", 0, frozen=True) reference = Parameter("reference", "1 TeV", frozen=True) @@ -1049,7 +1046,6 @@ class PowerLaw2SpectralModel(SpectralModel): scale_method="scale10", interp="log", ) - amplitude._is_norm = True index = Parameter("index", 2) emin = Parameter("emin", "0.1 TeV", frozen=True) emax = Parameter("emax", "100 TeV", frozen=True) @@ -1141,7 +1137,6 @@ class BrokenPowerLawSpectralModel(SpectralModel): scale_method="scale10", interp="log", ) - amplitude._is_norm = True ebreak = Parameter("ebreak", "1 TeV") @staticmethod @@ -1189,7 +1184,6 @@ class SmoothBrokenPowerLawSpectralModel(SpectralModel): scale_method="scale10", interp="log", ) - amplitude._is_norm = True ebreak = Parameter("ebreak", "1 TeV") reference = Parameter("reference", "1 TeV", frozen=True) beta = Parameter("beta", 1, frozen=True) @@ -1327,7 +1321,6 @@ class ExpCutoffPowerLawSpectralModel(SpectralModel): scale_method="scale10", interp="log", ) - amplitude._is_norm = True reference = Parameter("reference", "1 TeV", frozen=True) lambda_ = Parameter("lambda_", "0.1 TeV-1") alpha = Parameter("alpha", "1.0", frozen=True) @@ -1367,7 +1360,7 @@ class ExpCutoffPowerLawNormSpectralModel(SpectralModel): ---------- index : `~astropy.units.Quantity` :math:`\Gamma`. - Default is 1.5. + Default is 0. norm : `~astropy.units.Quantity` :math:`\phi_0`. Default is 1. @@ -1388,9 +1381,8 @@ class ExpCutoffPowerLawNormSpectralModel(SpectralModel): tag = ["ExpCutoffPowerLawNormSpectralModel", "ecpl-norm"] - index = Parameter("index", 1.5) + index = Parameter("index", 0.0) norm = Parameter("norm", 1, unit="", interp="log") - norm._is_norm = True reference = Parameter("reference", "1 TeV", frozen=True) lambda_ = Parameter("lambda_", "0.1 TeV-1") alpha = Parameter("alpha", "1.0", frozen=True) @@ -1400,7 +1392,7 @@ def __init__( ): if index is None: warnings.warn( - "The default index value changed from 1.5 to 0 since v1.2", + "The default index value changed from 1.5 to 0 since v1.3", GammapyDeprecationWarning, ) @@ -1455,7 +1447,6 @@ class ExpCutoffPowerLaw3FGLSpectralModel(SpectralModel): scale_method="scale10", interp="log", ) - amplitude._is_norm = True reference = Parameter("reference", "1 TeV", frozen=True) ecut = Parameter("ecut", "10 TeV") @@ -1504,7 +1495,6 @@ class SuperExpCutoffPowerLaw3FGLSpectralModel(SpectralModel): scale_method="scale10", interp="log", ) - amplitude._is_norm = True reference = Parameter("reference", "1 TeV", frozen=True) ecut = Parameter("ecut", "10 TeV") index_1 = Parameter("index_1", 1.5) @@ -1547,7 +1537,6 @@ class SuperExpCutoffPowerLaw4FGLSpectralModel(SpectralModel): scale_method="scale10", interp="log", ) - amplitude._is_norm = True reference = Parameter("reference", "1 TeV", frozen=True) expfactor = Parameter("expfactor", "1e-2") index_1 = Parameter("index_1", 1.5) @@ -1595,7 +1584,6 @@ class SuperExpCutoffPowerLaw4FGLDR3SpectralModel(SpectralModel): scale_method="scale10", interp="log", ) - amplitude._is_norm = True reference = Parameter("reference", "1 TeV", frozen=True) expfactor = Parameter("expfactor", "1e-2") index_1 = Parameter("index_1", 1.5) @@ -1647,7 +1635,6 @@ class LogParabolaSpectralModel(SpectralModel): scale_method="scale10", interp="log", ) - amplitude._is_norm = True reference = Parameter("reference", "10 TeV", frozen=True) alpha = Parameter("alpha", 2) beta = Parameter("beta", 1) @@ -1702,24 +1689,11 @@ class LogParabolaNormSpectralModel(SpectralModel): tag = ["LogParabolaNormSpectralModel", "lp-norm"] norm = Parameter("norm", 1, unit="", interp="log") - norm._is_norm = True reference = Parameter("reference", "10 TeV", frozen=True) alpha = Parameter("alpha", 0) beta = Parameter("beta", 0) def __init__(self, norm=None, reference=None, alpha=None, beta=None, **kwargs): - if alpha is None: - warnings.warn( - "The default alpha value changed from 2 to 0 since v1.2", - GammapyDeprecationWarning, - ) - - if beta is None: - warnings.warn( - "The default beta value changed from 1 to 0 since v1.2", - GammapyDeprecationWarning, - ) - if norm is not None: kwargs.update({"norm": norm}) if beta is not None: @@ -2402,7 +2376,6 @@ class GaussianSpectralModel(SpectralModel): tag = ["GaussianSpectralModel", "gauss"] amplitude = Parameter("amplitude", 1e-12 * u.Unit("cm-2 s-1"), interp="log") - amplitude._is_norm = True mean = Parameter("mean", 1 * u.TeV) sigma = Parameter("sigma", 2 * u.TeV) diff --git a/gammapy/modeling/models/spectral_cosmic_ray.py b/gammapy/modeling/models/spectral_cosmic_ray.py index 364f14d35f..65573fc2b0 100644 --- a/gammapy/modeling/models/spectral_cosmic_ray.py +++ b/gammapy/modeling/models/spectral_cosmic_ray.py @@ -2,7 +2,7 @@ """Simple models for cosmic ray spectra at Earth. For measurements, the "Database of Charged Cosmic Rays (CRDB)" is a great resource: -http://lpsc.in2p3.fr/cosmic-rays-db/ +https://lpsc.in2p3.fr/crdb/ """ import numpy as np from astropy import units as u @@ -37,7 +37,7 @@ def evaluate(energy, L, Ep, w): def create_cosmic_ray_spectral_model(particle="proton"): """Cosmic a cosmic ray spectral model at Earth. - These are the spectra assumed in this CTA study: + These are the spectra assumed in this CTAO study: Table 3 in https://ui.adsabs.harvard.edu/abs/2013APh....43..171B The spectrum given is a differential flux ``dnde`` in units of diff --git a/gammapy/modeling/models/spectral_crab.py b/gammapy/modeling/models/spectral_crab.py index de2520451b..c2e936af72 100644 --- a/gammapy/modeling/models/spectral_crab.py +++ b/gammapy/modeling/models/spectral_crab.py @@ -23,7 +23,6 @@ class MeyerCrabSpectralModel(SpectralModel): """ norm = Parameter("norm", value=1, frozen=True) - norm._is_norm = True coefficients = [-0.00449161, 0, 0.0473174, -0.179475, -0.53616, -10.2708] @staticmethod diff --git a/gammapy/modeling/models/tests/test_io.py b/gammapy/modeling/models/tests/test_io.py index c0eba87f66..b23d6e9109 100644 --- a/gammapy/modeling/models/tests/test_io.py +++ b/gammapy/modeling/models/tests/test_io.py @@ -360,8 +360,6 @@ def make_all_models(): yield Model.create("SuperExpCutoffPowerLaw4FGLDR3SpectralModel", "spectral") yield Model.create("SuperExpCutoffPowerLaw4FGLSpectralModel", "spectral") yield Model.create("LogParabolaSpectralModel", "spectral") - with pytest.warns(GammapyDeprecationWarning): - yield Model.create("LogParabolaNormSpectralModel", "spectral") yield Model.create( "TemplateSpectralModel", "spectral", energy=[1, 2] * u.cm, values=[3, 4] * u.cm ) # TODO: add unit validation? diff --git a/gammapy/modeling/models/tests/test_spectral.py b/gammapy/modeling/models/tests/test_spectral.py index 59aece551a..89526d2126 100644 --- a/gammapy/modeling/models/tests/test_spectral.py +++ b/gammapy/modeling/models/tests/test_spectral.py @@ -1291,10 +1291,3 @@ def test_template_ND_EBL(tmpdir): assert_allclose(template_new.map.data, region_map.data) assert len(template.parameters) == 1 assert_allclose(template.parameters["redshift"].value, 0.1) - - -def test_is_norm_spectral_models(): - for test_model in TEST_MODELS: - m = test_model["model"] - if m.tag[0] not in ["PiecewiseNormSpectralModel", "TemplateSpectralModel"]: - assert np.any([p._is_norm for p in m.parameters]) diff --git a/gammapy/modeling/parameter.py b/gammapy/modeling/parameter.py index 6a23fe415b..189c99dd1c 100644 --- a/gammapy/modeling/parameter.py +++ b/gammapy/modeling/parameter.py @@ -1,5 +1,6 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst """Model parameter classes.""" + import collections.abc import copy import html @@ -8,7 +9,6 @@ import numpy as np from astropy import units as u from astropy.table import Table -from gammapy.utils.deprecation import deprecated_attribute from gammapy.utils.interpolation import interpolation_scale __all__ = ["Parameter", "Parameters", "PriorParameter", "PriorParameters"] @@ -93,16 +93,10 @@ class Parameter: Method used to set ``factor`` and ``scale``. interp : {"lin", "sqrt", "log"} Parameter scaling to use for the scan. - is_norm : bool - Whether the parameter represents the flux norm of the model. prior : `~gammapy.modeling.models.Prior` Prior set on the parameter. """ - norm_parameters = deprecated_attribute("norm_parameters", "1.2") - - is_norm = deprecated_attribute("is_norm", "1.2") - def __init__( self, name, @@ -120,7 +114,6 @@ def __init__( scan_values=None, scale_method="scale10", interp="lin", - is_norm=False, prior=None, ): if not isinstance(name, str): @@ -133,7 +126,6 @@ def __init__( self.max = max self.frozen = frozen self._error = error - self._is_norm = is_norm self._type = None # TODO: move this to a setter method that can be called from `__set__` also! @@ -459,7 +451,6 @@ def to_dict(self): "frozen": self.frozen, "interp": self.interp, "scale_method": self.scale_method, - "is_norm": self._is_norm, } if self._link_label_io is not None: @@ -601,11 +592,6 @@ def copy(self): """Deep copy.""" return copy.deepcopy(self) - @property - def norm_parameters(self): - """List of norm parameters.""" - return self.__class__([par for par in self._parameters if par._is_norm]) - @property def free_parameters(self): """List of free parameters.""" @@ -675,7 +661,6 @@ def _create_default_table(): "min": "float", "max": "float", "frozen": "bool", - "is_norm": "bool", "link": "str", "prior": "str", } diff --git a/gammapy/modeling/sherpa.py b/gammapy/modeling/sherpa.py index edeac63993..cdaf0fed99 100644 --- a/gammapy/modeling/sherpa.py +++ b/gammapy/modeling/sherpa.py @@ -51,8 +51,12 @@ def optimize_sherpa(parameters, function, store_trace=False, **kwargs): optimizer.config.update(kwargs) pars = [par.factor for par in parameters.free_parameters] - parmins = [par.factor_min for par in parameters.free_parameters] - parmaxes = [par.factor_max for par in parameters.free_parameters] + parmins = np.nan_to_num( + [par.factor_min for par in parameters.free_parameters], nan=-np.inf + ) + parmaxes = np.nan_to_num( + [par.factor_max for par in parameters.free_parameters], nan=np.inf + ) statfunc = SherpaLikelihood(function, parameters, store_trace) diff --git a/gammapy/modeling/tests/test_fit.py b/gammapy/modeling/tests/test_fit.py index 56a28d5345..de89acde38 100644 --- a/gammapy/modeling/tests/test_fit.py +++ b/gammapy/modeling/tests/test_fit.py @@ -1,5 +1,6 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst """Unit tests for the Fit class""" + import pytest from numpy.testing import assert_allclose from astropy.table import Table @@ -364,3 +365,18 @@ def test_write(tmpdir): assert "CovarianceResult" not in data assert "OptimizeResult" in data + + +@requires_data() +def test_covariance_no_optimize_results(): + spec = SpectrumDatasetOnOff.read( + "$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs23523.fits" + ) + spec.models = [SkyModel.create(spectral_model="pl")] + + fit = Fit() + fit.optimize([spec]) + res = fit.covariance([spec]) + + assert_allclose(res.matrix.data[0, 1], 6.163970e-13, rtol=1e-3) + assert_allclose(res.matrix.data[0, 0], 2.239832e-02, rtol=1e-3) diff --git a/gammapy/modeling/tests/test_parameter.py b/gammapy/modeling/tests/test_parameter.py index f0b4db9cc7..dd5ee6ad27 100644 --- a/gammapy/modeling/tests/test_parameter.py +++ b/gammapy/modeling/tests/test_parameter.py @@ -168,7 +168,7 @@ def test_parameters_to_table(pars, tmp_path): table = pars.to_table() assert len(table) == 2 - assert len(table.columns) == 11 + assert len(table.columns) == 10 assert table["link"][0] == "test" assert table["link"][1] == "" @@ -184,7 +184,7 @@ def test_parameters_create_table(): table = Parameters._create_default_table() assert len(table) == 0 - assert len(table.columns) == 11 + assert len(table.columns) == 10 assert table.colnames == [ "type", @@ -195,7 +195,6 @@ def test_parameters_create_table(): "min", "max", "frozen", - "is_norm", "link", "prior", ] @@ -209,7 +208,6 @@ def test_parameters_create_table(): ("min", "=5.0,<5.3 numpy<2.0 + sherpa<4.17 sphinx-astropy sphinx sphinx-click