diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml
new file mode 100644
index 00000000..851150d5
--- /dev/null
+++ b/.github/workflows/documentation.yml
@@ -0,0 +1,28 @@
+name: documentation
+
+on: [push, pull_request, workflow_dispatch]
+
+permissions:
+ contents: write
+
+jobs:
+ docs:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install .[doc]
+ - name: Sphinx build
+ run: |
+ sphinx-build doc _build
+ - name: Deploy to GitHub Pages
+ uses: peaceiris/actions-gh-pages@v3
+ if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
+ with:
+ publish_branch: gh-pages
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_dir: _build/
+ force_orphan: true
\ No newline at end of file
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index a7250626..e6f62781 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -74,15 +74,17 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- pip install ".[docs]"
+ pip install ".[doc]"
- name: Build site
- run: mkdocs build
+ run: |
+ cd doc
+ make html
- name: Check html
uses: chabad360/htmlproofer@master
with:
- directory: "./site"
+ directory: "./doc/_build/html"
# The directory to scan
- arguments: --checks Links,Scripts --ignore-urls "https://fonts.gstatic.com,https://mkdocs-gallery.github.io" --assume-extension --check-external-hash --ignore-status-codes 403
+ arguments: --checks Links,Scripts --ignore-urls "https://fonts.gstatic.com,https://mkdocs-gallery.github.io,./doc/_build/html/_static/" --assume-extension --check-external-hash --ignore-status-codes 403 --ignore-files "/.+\/html\/_static\/.+/"
# The arguments to pass to HTMLProofer
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 00000000..d4bb2cbb
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+SOURCEDIR = .
+BUILDDIR = _build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/doc/README.md b/doc/README.md
new file mode 100644
index 00000000..fcb4eeca
--- /dev/null
+++ b/doc/README.md
@@ -0,0 +1,11 @@
+Building the pynapple documentation
+===========
+
+`pip install pynapple[doc]`
+
+`pip install sphinx-autobuild`
+
+In doc:
+
+`sphinx-autobuild . _build/html`
+
diff --git a/doc/_static/CCN-logo-wText.png b/doc/_static/CCN-logo-wText.png
new file mode 100644
index 00000000..206f7693
Binary files /dev/null and b/doc/_static/CCN-logo-wText.png differ
diff --git a/doc/_static/Icon/Pynapple_final_icon.png b/doc/_static/Icon/Pynapple_final_icon.png
new file mode 100644
index 00000000..1ba520bb
Binary files /dev/null and b/doc/_static/Icon/Pynapple_final_icon.png differ
diff --git a/doc/_static/Logo/Pynapple_final_logo.png b/doc/_static/Logo/Pynapple_final_logo.png
new file mode 100644
index 00000000..b61fd05a
Binary files /dev/null and b/doc/_static/Logo/Pynapple_final_logo.png differ
diff --git a/doc/_static/custom.css b/doc/_static/custom.css
new file mode 100644
index 00000000..6b5d41e0
--- /dev/null
+++ b/doc/_static/custom.css
@@ -0,0 +1,21 @@
+
+.bd-main .bd-content .bd-article-container{
+ max-width:100%;
+ flex-grow: 1;
+}
+
+html[data-theme=light]{
+ --pst-color-primary: rgb(52, 54, 99);
+ --pst-color-secondary: rgb(107, 161, 174);
+ --pst-color-link: rgb(74, 105, 145);
+ --pst-color-inline-code: rgb(96, 141, 130);
+}
+
+:root {
+ --pst-font-size-h1: 38px;
+ --pst-font-size-h2: 32px;
+ --pst-font-size-h3: 27px;
+ --pst-font-size-h4: 22px;
+ --pst-font-size-h5: 18px;
+ --pst-font-size-h6: 15px;
+}
\ No newline at end of file
diff --git a/doc/_static/example_thumbs/Pynapple_final_icon.png b/doc/_static/example_thumbs/Pynapple_final_icon.png
new file mode 100644
index 00000000..382dfb14
Binary files /dev/null and b/doc/_static/example_thumbs/Pynapple_final_icon.png differ
diff --git a/doc/_static/example_thumbs/correlation.svg b/doc/_static/example_thumbs/correlation.svg
new file mode 100644
index 00000000..5c1a7da5
--- /dev/null
+++ b/doc/_static/example_thumbs/correlation.svg
@@ -0,0 +1,22 @@
+
+
+
diff --git a/doc/_static/example_thumbs/decoding.svg b/doc/_static/example_thumbs/decoding.svg
new file mode 100644
index 00000000..3f4248b2
--- /dev/null
+++ b/doc/_static/example_thumbs/decoding.svg
@@ -0,0 +1,22 @@
+
+
+
diff --git a/doc/_static/example_thumbs/filtering.svg b/doc/_static/example_thumbs/filtering.svg
new file mode 100644
index 00000000..3a1bfc60
--- /dev/null
+++ b/doc/_static/example_thumbs/filtering.svg
@@ -0,0 +1,8 @@
+
+
+
diff --git a/doc/_static/example_thumbs/perievent.svg b/doc/_static/example_thumbs/perievent.svg
new file mode 100644
index 00000000..029ad82c
--- /dev/null
+++ b/doc/_static/example_thumbs/perievent.svg
@@ -0,0 +1,1008 @@
+
+
+
diff --git a/doc/_static/example_thumbs/timeseries.svg b/doc/_static/example_thumbs/timeseries.svg
new file mode 100644
index 00000000..2d2239b1
--- /dev/null
+++ b/doc/_static/example_thumbs/timeseries.svg
@@ -0,0 +1,142 @@
+
+
+
diff --git a/doc/_static/example_thumbs/tuningcurves.svg b/doc/_static/example_thumbs/tuningcurves.svg
new file mode 100644
index 00000000..36618e66
--- /dev/null
+++ b/doc/_static/example_thumbs/tuningcurves.svg
@@ -0,0 +1,17 @@
+
+
+
diff --git a/doc/_static/example_thumbs/wavelets.svg b/doc/_static/example_thumbs/wavelets.svg
new file mode 100644
index 00000000..f95b2196
--- /dev/null
+++ b/doc/_static/example_thumbs/wavelets.svg
@@ -0,0 +1,16 @@
+
+
+
diff --git a/doc/_templates/autosummary/class.rst b/doc/_templates/autosummary/class.rst
new file mode 100644
index 00000000..20bacc3e
--- /dev/null
+++ b/doc/_templates/autosummary/class.rst
@@ -0,0 +1,32 @@
+{{ fullname | escape | underline}}
+
+.. currentmodule:: {{ module }}
+
+.. autoclass:: {{ objname }}
+ :members:
+ :inherited-members:
+
+ {% block methods %}
+ .. automethod:: __init__
+
+ {% if methods %}
+ .. rubric:: Methods
+
+ .. autosummary::
+ :toctree: ./
+ {% for item in methods %}
+ ~{{ name }}.{{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+ {% block attributes %}
+ {% if attributes %}
+ .. rubric:: Attributes
+
+ .. autosummary::
+ {% for item in attributes %}
+ ~{{ name }}.{{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
\ No newline at end of file
diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html
new file mode 100644
index 00000000..a1853906
--- /dev/null
+++ b/doc/_templates/layout.html
@@ -0,0 +1,16 @@
+{% extends "!layout.html" %}
+
+{%- block footer %}
+
+{%- endblock %}
\ No newline at end of file
diff --git a/doc/_templates/version.html b/doc/_templates/version.html
new file mode 100644
index 00000000..2ffcad28
--- /dev/null
+++ b/doc/_templates/version.html
@@ -0,0 +1,3 @@
+
+{{ version }}
+
\ No newline at end of file
diff --git a/doc/api.rst b/doc/api.rst
new file mode 100644
index 00000000..c08fd444
--- /dev/null
+++ b/doc/api.rst
@@ -0,0 +1,115 @@
+.. _api_ref:
+
+API reference
+=============
+
+Core objects
+------------
+
+.. rubric:: Time Series
+
+.. currentmodule:: pynapple.core.time_series
+
+.. autosummary::
+ :toctree: generated/
+ :nosignatures:
+ :recursive:
+
+ Tsd
+ TsdFrame
+ TsdTensor
+
+
+.. rubric:: Intervals
+
+.. currentmodule:: pynapple.core.interval_set
+
+.. autosummary::
+ :toctree: generated/
+ :nosignatures:
+ :recursive:
+
+ IntervalSet
+
+
+.. rubric:: Timestamps
+
+.. currentmodule:: pynapple.core.time_series
+
+.. autosummary::
+ :toctree: generated/
+ :nosignatures:
+ :recursive:
+
+ Ts
+
+.. rubric:: Group of timestamps
+
+.. currentmodule:: pynapple.core.ts_group
+
+.. autosummary::
+ :toctree: generated/
+ :nosignatures:
+ :recursive:
+
+ TsGroup
+
+
+Input-Ouput
+-----------
+
+.. currentmodule:: pynapple.io.interface_nwb
+
+.. rubric:: Neurodata Without Borders (NWB)
+
+.. autosummary::
+ :toctree: generated/
+ :nosignatures:
+ :recursive:
+
+ NWBFile
+
+
+.. currentmodule:: pynapple.io.interface_npz
+
+.. rubric:: Numpy files
+
+.. autosummary::
+ :toctree: generated/
+ :nosignatures:
+ :recursive:
+
+ NPZFile
+
+
+.. currentmodule:: pynapple.io
+
+.. rubric:: Miscellaneous
+
+.. autosummary::
+ :toctree: generated/
+ :nosignatures:
+
+ misc
+ folder.Folder
+
+
+Analysis modules
+----------------
+
+.. currentmodule:: pynapple.process
+
+.. autosummary::
+ :toctree: generated/
+ :nosignatures:
+
+ correlograms
+ decoding
+ filtering
+ perievent
+ randomize
+ spectrum
+ tuning_curves
+ wavelets
+
+
diff --git a/doc/citing.md b/doc/citing.md
new file mode 100644
index 00000000..31a091ca
--- /dev/null
+++ b/doc/citing.md
@@ -0,0 +1,47 @@
+# Citing pynapple
+
+If you are using pynapple for your analysis, please cite it. A paper describing pynapple has been published in [ELife](https://elifesciences.org/reviewed-preprints/85786) :
+
+```{eval-rst}
+.. Important::
+ Viejo, Guillaume, Daniel Levenstein, Sofia Skromne Carrasco, Dhruv Mehrotra, Sara Mahallati, Gilberto R. Vite, Henry Denny, Lucas Sjulson, Francesco P. Battaglia, and Adrien Peyrache. "Pynapple, a toolbox for data analysis in neuroscience." Elife 12 (2023): RP85786.
+```
+
+Here is a ready-made BibTex entry:
+
+```
+@article{viejo2023pynapple,
+ title={Pynapple, a toolbox for data analysis in neuroscience},
+ author={Viejo, Guillaume and Levenstein, Daniel and Carrasco, Sofia Skromne and Mehrotra, Dhruv and Mahallati, Sara and Vite, Gilberto R and Denny, Henry and Sjulson, Lucas and Battaglia, Francesco P and Peyrache, Adrien},
+ journal={Elife},
+ volume={12},
+ pages={RP85786},
+ year={2023},
+ publisher={eLife Sciences Publications Limited}
+}
+```
+
+## Development Lead
+
+
+- Guillaume Viejo
+- Edoardo Balzani
+
+
+## Contributors
+
+```{eval-rst}
+.. contributors:: pynapple-org/pynapple
+ :contributions:
+ :avatars:
+ :limit: 99
+ :order: DESC
+```
+
+## Special Credits
+
+Special thanks to Francesco P. Battaglia
+() for the development of the original
+*TSToolbox* () and
+*neuroseries* () packages,
+the latter constituting the core of *pynapple*.
diff --git a/doc/conf.py b/doc/conf.py
new file mode 100644
index 00000000..8592308c
--- /dev/null
+++ b/doc/conf.py
@@ -0,0 +1,152 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# For the full list of built-in configuration values, see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import os
+import sys
+import time
+import pynapple
+from pathlib import Path
+
+sys.path.insert(0, os.path.abspath('..'))
+sys.path.insert(0, os.path.abspath('sphinxext'))
+
+
+# -- Project information -----------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
+
+project = 'pynapple'
+copyright = f'2021-{time.strftime("%Y")}'
+author = 'Guillaume Viejo'
+version = release = pynapple.__version__
+
+
+# -- General configuration ---------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
+
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.napoleon',
+ 'sphinx.ext.autosummary',
+ 'sphinx.ext.coverage',
+ 'sphinx.ext.viewcode', # Links to source code
+ 'sphinx.ext.doctest',
+ 'sphinx_copybutton', # Adds copy button to code blocks
+ 'sphinx_design', # For layout components
+ 'myst_nb',
+ 'sphinx_contributors'
+ # 'sphinxcontrib.apidoc'
+ # 'sphinx_gallery.gen_gallery',
+ # 'myst_sphinx_gallery',
+]
+
+
+templates_path = ['_templates']
+
+
+# The Root document
+root_doc = "index"
+
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = ['_build', 'docstrings', 'nextgen', 'Thumbs.db', '.DS_Store']
+
+
+# Generate the API documentation when building
+autosummary_generate = True
+numpydoc_show_class_members = True
+autodoc_default_options = {
+ 'members': True,
+ 'inherited-members': True,
+ 'show-inheritance': True,
+ }
+
+# apidoc_module_dir = '../pynapple'
+# apidoc_output_dir = 'reference'
+# apidoc_excluded_paths = ['tests']
+# apidoc_separate_modules = True
+
+
+# ----------------------------------------------------------------------------
+# -- Options for HTML output -------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
+
+html_theme = 'pydata_sphinx_theme'
+
+html_logo = "_static/Logo/Pynapple_final_logo.png"
+html_favicon = "_static/Icon/Pynapple_final_icon.png"
+
+
+# Additional theme options
+html_theme_options = {
+ "icon_links": [
+ {
+ "name": "GitHub",
+ "url": "https://github.com/pynapple-org/pynapple",
+ "icon": "fab fa-github",
+ "type": "fontawesome",
+ },
+ {
+ "name": "Twitter",
+ "url": "https://twitter.com/thepynapple",
+ "icon": "fab fa-twitter",
+ "type": "fontawesome",
+ },
+ ],
+ "show_prev_next": True,
+ "header_links_before_dropdown": 5,
+}
+
+html_context = {
+ "default_mode": "light",
+}
+
+html_sidebars = {
+ "index": [],
+ "installing":[],
+ "releases":[],
+ "external":[],
+ "pynajax":[],
+ "citing":[],
+ "**": ["search-field.html", "sidebar-nav-bs.html"],
+}
+
+# # Path for static files (custom stylesheets or JavaScript)
+html_static_path = ['_static']
+html_css_files = ['custom.css']
+
+# Copybutton settings (to hide prompt)
+copybutton_prompt_text = r">>> |\$ |# "
+copybutton_prompt_is_regexp = True
+
+# Enable markdown and notebook support
+myst_enable_extensions = ["colon_fence"] # For improved markdown
+
+# # ----------------------------------------------------------------------------
+# # -- Autodoc and Napoleon Options -------------------------------------------------
+autodoc_default_options = {
+ 'members': True,
+ 'undoc-members': True,
+ 'show-inheritance': True,
+}
+napoleon_numpy_docstring = True
+
+
+
+nitpicky = True
+
+
+
+
+
+
+
diff --git a/doc/examples.md b/doc/examples.md
new file mode 100644
index 00000000..e5acc868
--- /dev/null
+++ b/doc/examples.md
@@ -0,0 +1,53 @@
+Examples
+========
+
+This page contains fully worked examples of data analysis using pynapple
+with neurophysiological datasets.
+
+
+:::{card}
+```{toctree}
+:maxdepth: 3
+Analysing head-direction cells
+```
+:::
+
+
+:::{card}
+```{toctree}
+:maxdepth: 3
+Streaming data from Dandi
+```
+:::
+
+
+:::{card}
+```{toctree}
+:maxdepth: 3
+Computing calcium imaging tuning curves
+```
+:::
+
+
+:::{card}
+```{toctree}
+:maxdepth: 3
+Example perievent
+```
+:::
+
+
+:::{card}
+```{toctree}
+:maxdepth: 3
+Example wavelet decomposition
+```
+:::
+
+
+:::{card}
+```{toctree}
+:maxdepth: 3
+Example filtering
+```
+:::
diff --git a/doc/examples/tutorial_HD_dataset.md b/doc/examples/tutorial_HD_dataset.md
new file mode 100644
index 00000000..ffbe41fd
--- /dev/null
+++ b/doc/examples/tutorial_HD_dataset.md
@@ -0,0 +1,300 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+
+Analysing head-direction cells
+============
+
+This tutorial demonstrates how we use Pynapple to generate Figure 4a in the [publication](https://elifesciences.org/reviewed-preprints/85786).
+The NWB file for the example is hosted on [OSF](https://osf.io/jb2gd). We show below how to stream it.
+The entire dataset can be downloaded [here](https://dandiarchive.org/dandiset/000056).
+
+
+```{code-cell} ipython3
+import numpy as np
+import pandas as pd
+import pynapple as nap
+import scipy.ndimage
+import matplotlib.pyplot as plt
+import seaborn as sns
+import requests, math, os
+import tqdm
+
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+```
+
+***
+Downloading the data
+------------------
+
+It's a small NWB file.
+
+
+```{code-cell} ipython3
+path = "Mouse32-140822.nwb"
+if path not in os.listdir("."):
+ r = requests.get(f"https://osf.io/jb2gd/download", stream=True)
+ block_size = 1024*1024
+ with open(path, 'wb') as f:
+ for data in tqdm.tqdm(r.iter_content(block_size), unit='MB', unit_scale=True,
+ total=math.ceil(int(r.headers.get('content-length', 0))//block_size)):
+ f.write(data)
+```
+
+***
+Parsing the data
+------------------
+
+The first step is to load the data and other relevant variables of interest
+
+
+```{code-cell} ipython3
+data = nap.load_file(path) # Load the NWB file for this dataset
+```
+
+What does this look like ?
+
+
+```{code-cell} ipython3
+print(data)
+```
+
+***
+Head-Direction Tuning Curves
+------------------
+
+To plot Head-Direction Tuning curves, we need the spike timings and the orientation of the animal. These quantities are stored in the variables 'units' and 'ry'.
+
+
+```{code-cell} ipython3
+spikes = data["units"] # Get spike timings
+epochs = data["epochs"] # Get the behavioural epochs (in this case, sleep and wakefulness)
+angle = data["ry"] # Get the tracked orientation of the animal
+```
+
+What does this look like ?
+
+
+```{code-cell} ipython3
+print(spikes)
+```
+
+Here, rate is the mean firing rate of the unit. Location indicates the brain region the unit was recorded from, and group refers to the shank number on which the cell was located.
+
+This dataset contains units recorded from the anterior thalamus. Head-direction (HD) cells are found in the anterodorsal nucleus of the thalamus (henceforth referred to as ADn). Units were also recorded from nearby thalamic nuclei in this animal. For the purposes of our tutorial, we are interested in the units recorded in ADn. We can restrict ourselves to analysis of these units rather easily, using Pynapple.
+
+
+```{code-cell} ipython3
+spikes_adn = spikes.getby_category("location")["adn"] # Select only those units that are in ADn
+```
+
+What does this look like ?
+
+
+```{code-cell} ipython3
+print(spikes_adn)
+```
+
+Let's compute some head-direction tuning curves. To do this in Pynapple, all you need is a single line of code!
+
+Plot firing rate of ADn units as a function of heading direction, i.e. a head-direction tuning curve
+
+
+```{code-cell} ipython3
+tuning_curves = nap.compute_1d_tuning_curves(
+ group=spikes_adn,
+ feature=angle,
+ nb_bins=61,
+ ep = epochs['wake'],
+ minmax=(0, 2 * np.pi)
+ )
+```
+
+What does this look like ?
+
+
+```{code-cell} ipython3
+tuning_curves
+```
+
+Each row indicates an angular bin (in radians), and each column corresponds to a single unit. Let's compute the preferred angle quickly as follows:
+
+
+```{code-cell} ipython3
+pref_ang = tuning_curves.idxmax()
+```
+
+For easier visualization, we will colour our plots according to the preferred angle of the cell. To do so, we will normalize the range of angles we have, over a colourmap.
+
+
+```{code-cell} ipython3
+norm = plt.Normalize() # Normalizes data into the range [0,1]
+color = plt.cm.hsv(norm([i / (2 * np.pi) for i in pref_ang.values])) # Assigns a colour in the HSV colourmap for each value of preferred angle
+color = pd.DataFrame(index=pref_ang.index, data = color, columns = ['r', 'g', 'b', 'a'])
+```
+
+To make the tuning curves look nice, we will smooth them before plotting, using this custom function:
+
+
+```{code-cell} ipython3
+from scipy.ndimage import gaussian_filter1d
+def smoothAngularTuningCurves(tuning_curves, sigma=2):
+
+ tmp = np.concatenate((tuning_curves.values, tuning_curves.values, tuning_curves.values))
+ tmp = gaussian_filter1d(tmp, sigma=sigma, axis=0)
+
+ return pd.DataFrame(index = tuning_curves.index,
+ data = tmp[tuning_curves.shape[0]:tuning_curves.shape[0]*2],
+ columns = tuning_curves.columns
+ )
+```
+
+Therefore, we have:
+
+
+```{code-cell} ipython3
+smoothcurves = smoothAngularTuningCurves(tuning_curves, sigma=3)
+```
+
+What does this look like? Let's plot the tuning curves!
+
+
+```{code-cell} ipython3
+plt.figure(figsize=(12, 9))
+for i, n in enumerate(pref_ang.sort_values().index.values):
+ plt.subplot(8, 4, i + 1, projection='polar') # Plot the curves in 8 rows and 4 columns
+ plt.plot(
+ smoothcurves[n], color=color.loc[n]
+ ) # Colour of the curves determined by preferred angle
+ plt.xticks([])
+plt.show()
+```
+
+Awesome!
+
+
++++
+
+***
+Decoding
+------------------
+
+Now that we have HD tuning curves, we can go one step further. Using only the population activity of ADn units, we can decode the direction the animal is looking in. We will then compare this to the real head direction of the animal, and discover that population activity in the ADn indeed codes for HD.
+
+To decode the population activity, we will be using a Bayesian Decoder as implemented in Pynapple. Just a single line of code!
+
+
+```{code-cell} ipython3
+decoded, proba_feature = nap.decode_1d(
+ tuning_curves=tuning_curves,
+ group=spikes_adn,
+ ep=epochs["wake"],
+ bin_size=0.1, # second
+ feature=angle,
+)
+```
+
+What does this look like ?
+
+
+```{code-cell} ipython3
+print(decoded)
+```
+
+The variable 'decoded' indicates the most probable angle in which the animal was looking. There is another variable, 'proba_feature' that denotes the probability of a given angular bin at a given time point. We can look at it below:
+
+
+```{code-cell} ipython3
+print(proba_feature)
+```
+
+Each row is a time bin, and each column is an angular bin. The sum of all values in a row add up to 1.
+
+Now, let's plot the raster plot for a given period of time, and overlay the actual and decoded HD on the population activity.
+
+
+```{code-cell} ipython3
+ep = nap.IntervalSet(
+ start=10717, end=10730
+) # Select an arbitrary interval for plotting
+
+plt.figure()
+plt.rc("font", size=12)
+for i, n in enumerate(spikes_adn.keys()):
+ plt.plot(
+ spikes[n].restrict(ep).fillna(pref_ang[n]), "|", color=color.loc[n]
+ ) # raster plot for each cell
+plt.plot(
+ decoded.restrict(ep), "--", color="grey", linewidth=2, label="decoded HD"
+) # decoded HD
+plt.legend(loc="upper left")
+plt.xlabel("Time (s)")
+plt.ylabel("Neurons")
+```
+
+From this plot, we can see that the decoder is able to estimate the head-direction based on the population activity in ADn. Amazing!
+
+What does the probability distribution in this example event look like?
+Ideally, the bins with the highest probability will correspond to the bins having the most spikes. Let's plot the probability matrix to visualize this.
+
+
+```{code-cell} ipython3
+smoothed = scipy.ndimage.gaussian_filter(
+ proba_feature, 1
+) # Smoothening the probability distribution
+
+# Create a DataFrame with the smoothed distribution
+p_feature = pd.DataFrame(
+ index=proba_feature.index.values,
+ columns=proba_feature.columns.values,
+ data=smoothed,
+)
+p_feature = nap.TsdFrame(p_feature) # Make it a Pynapple TsdFrame
+
+plt.figure()
+plt.plot(
+ angle.restrict(ep), "w", linewidth=2, label="actual HD", zorder=1
+) # Actual HD, in white
+plt.plot(
+ decoded.restrict(ep), "--", color="grey", linewidth=2, label="decoded HD", zorder=1
+) # Decoded HD, in grey
+
+# Plot the smoothed probability distribution
+plt.imshow(
+ np.transpose(p_feature.restrict(ep).values),
+ aspect="auto",
+ interpolation="bilinear",
+ extent=[ep["start"][0], ep["end"][0], 0, 2 * np.pi],
+ origin="lower",
+ cmap="viridis",
+)
+
+plt.xlabel("Time (s)") # X-axis is time in seconds
+plt.ylabel("Angle (rad)") # Y-axis is the angle in radian
+plt.colorbar(label="probability")
+```
+
+From this probability distribution, we observe that the decoded HD very closely matches the actual HD. Therefore, the population activity in ADn is a reliable estimate of the heading direction of the animal.
+
+I hope this tutorial was helpful. If you have any questions, comments or suggestions, please feel free to reach out to the Pynapple Team!
+
+
+:::{card}
+Authors
+^^^
+Dhruv Mehrotra
+
+Guillaume Viejo
+
+:::
\ No newline at end of file
diff --git a/doc/examples/tutorial_calcium_imaging.md b/doc/examples/tutorial_calcium_imaging.md
new file mode 100644
index 00000000..0a11f05e
--- /dev/null
+++ b/doc/examples/tutorial_calcium_imaging.md
@@ -0,0 +1,211 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3 (ipykernel)
+ language: python
+ name: python3
+---
+
+
+Calcium Imaging
+============
+
+Working with calcium data.
+
+For the example dataset, we will be working with a recording of a freely-moving mouse imaged with a Miniscope (1-photon imaging). The area recorded for this experiment is the postsubiculum - a region that is known to contain head-direction cells, or cells that fire when the animal's head is pointing in a specific direction.
+
+The NWB file for the example is hosted on [OSF](https://osf.io/sbnaw). We show below how to stream it.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+import numpy as pd
+import pynapple as nap
+import matplotlib.pyplot as plt
+import seaborn as sns
+import sys, os
+import requests, math
+import tqdm
+
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+```
+
+***
+Downloading the data
+------------------
+First things first: Let's find our file
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+path = "A0670-221213.nwb"
+if path not in os.listdir("."):
+ r = requests.get(f"https://osf.io/sbnaw/download", stream=True)
+ block_size = 1024*1024
+ with open(path, 'wb') as f:
+ for data in tqdm.tqdm(r.iter_content(block_size), unit='MB', unit_scale=True,
+ total=math.ceil(int(r.headers.get('content-length', 0))//block_size)):
+ f.write(data)
+```
+
+***
+Parsing the data
+------------------
+Now that we have the file, let's load the data
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+data = nap.load_file(path)
+print(data)
+```
+
+Let's save the RoiResponseSeries as a variable called 'transients' and print it
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+transients = data['RoiResponseSeries']
+print(transients)
+```
+
+***
+Plotting the activity of one neuron
+-----------------------------------
+Our transients are saved as a (35757, 65) TsdFrame. Looking at the printed object, you can see that we have 35757 data points for each of our 65 regions of interest. We want to see which of these are head-direction cells, so we need to plot a tuning curve of fluorescence vs head-direction of the animal.
+
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+plt.figure(figsize=(6, 2))
+plt.plot(transients[0:2000,0], linewidth=5)
+plt.xlabel("Time (s)")
+plt.ylabel("Fluorescence")
+plt.show()
+```
+
+Here we extract the head-direction as a variable called angle
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+angle = data['ry']
+print(angle)
+```
+
+As you can see, we have a longer recording for our tracking of the animal's head than we do for our calcium imaging - something to keep in mind.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+print(transients.time_support)
+print(angle.time_support)
+```
+
+***
+Calcium tuning curves
+---------------------
+Here we compute the tuning curves of all the neurons
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+tcurves = nap.compute_1d_tuning_curves_continuous(transients, angle, nb_bins = 120)
+
+print(tcurves)
+```
+
+We now have a DataFrame, where our index is the angle of the animal's head in radians, and each column represents the tuning curve of each region of interest. We can plot one neuron.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+plt.figure()
+plt.plot(tcurves[4])
+plt.xlabel("Angle")
+plt.ylabel("Fluorescence")
+plt.show()
+```
+
+It looks like this could be a head-direction cell. One important property of head-directions cells however, is that their firing with respect to head-direction is stable. To check for their stability, we can split our recording in two and compute a tuning curve for each half of the recording.
+
+We start by finding the midpoint of the recording, using the function `get_intervals_center`. Using this, then create one new IntervalSet with two rows, one for each half of the recording.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+center = transients.time_support.get_intervals_center()
+
+halves = nap.IntervalSet(
+ start = [transients.time_support.start[0], center.t[0]],
+ end = [center.t[0], transients.time_support.end[0]]
+ )
+```
+
+Now we can compute the tuning curves for each half of the recording and plot the tuning curves for the fifth region of interest.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+half1 = nap.compute_1d_tuning_curves_continuous(transients, angle, nb_bins = 120, ep = halves.loc[[0]])
+half2 = nap.compute_1d_tuning_curves_continuous(transients, angle, nb_bins = 120, ep = halves.loc[[1]])
+
+plt.figure(figsize=(12, 5))
+plt.subplot(1,2,1)
+plt.plot(half1[4])
+plt.title("First half")
+plt.xlabel("Angle")
+plt.ylabel("Fluorescence")
+plt.subplot(1,2,2)
+plt.plot(half2[4])
+plt.title("Second half")
+plt.xlabel("Angle")
+plt.show()
+```
+
+:::{card}
+Authors
+^^^
+Sofia Skromne Carrasco
+
+:::
diff --git a/doc/examples/tutorial_human_dataset.md b/doc/examples/tutorial_human_dataset.md
new file mode 100644
index 00000000..46b6b8b3
--- /dev/null
+++ b/doc/examples/tutorial_human_dataset.md
@@ -0,0 +1,299 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+
+Computing perievent
+============
+
+This tutorial demonstrates how we use Pynapple on various publicly available datasets in systems neuroscience to streamline analysis. In this tutorial, we will examine the dataset from [Zheng et al (2022)](https://www.nature.com/articles/s41593-022-01020-w), which was used to generate Figure 4c in the [publication](https://elifesciences.org/reviewed-preprints/85786).
+
+The NWB file for the example used here is provided in [this](https://github.com/PeyracheLab/pynacollada/tree/main/pynacollada/Pynapple%20Paper%20Figures/Zheng%202022/000207/sub-4) repository. The entire dataset can be downloaded [here](https://dandiarchive.org/dandiset/000207/0.220216.0323).
+
+
+```{code-cell} ipython3
+import matplotlib.pyplot as plt
+import numpy as np
+import pynapple as nap
+import seaborn as sns
+```
+
+***
+Stream the data from DANDI
+------------------
+
+
+```{code-cell} ipython3
+from pynwb import NWBHDF5IO
+
+from dandi.dandiapi import DandiAPIClient
+import fsspec
+from fsspec.implementations.cached import CachingFileSystem
+import h5py
+
+# Enter the session ID and path to the file
+dandiset_id, filepath = ("000207", "sub-4/sub-4_ses-4_ecephys.nwb")
+
+with DandiAPIClient() as client:
+ asset = client.get_dandiset(dandiset_id, "draft").get_asset_by_path(filepath)
+ s3_url = asset.get_content_url(follow_redirects=1, strip_query=True)
+
+# first, create a virtual filesystem based on the http protocol
+fs = fsspec.filesystem("http")
+
+# create a cache to save downloaded data to disk (optional)
+fs = CachingFileSystem(
+ fs=fs,
+ cache_storage="nwb-cache", # Local folder for the cache
+)
+
+# next, open the file
+file = h5py.File(fs.open(s3_url, "rb"))
+io = NWBHDF5IO(file=file, load_namespaces=True)
+```
+
+***
+Parsing the data
+------------------
+
+The first step is to load the data from the Neurodata Without Borders (NWB) file. This is done as follows:
+
+
+
+```{code-cell} ipython3
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+
+data = nap.NWBFile(io.read()) # Load the NWB file for this dataset
+
+# What does this look like?
+print(data)
+```
+
+Get spike timings
+
+
+```{code-cell} ipython3
+spikes = data["units"]
+```
+
+What does this look like?
+
+
+```{code-cell} ipython3
+print(spikes)
+```
+
+This TsGroup has, among other information, the mean firing rate of the unit, the X, Y and Z coordinates, the brain region the unit was recorded from, and the channel number on which the unit was located.
+
+
++++
+
+Next, let's get the encoding table of all stimulus times, as shown below:
+
+
+```{code-cell} ipython3
+encoding_table = data["encoding_table"]
+
+# What does this look like?
+print(encoding_table)
+```
+
+This table has, among other things, the scene boundary times for which we will plot the peri-event time histogram (PETH).
+
+There are 3 types of scene boundaries in this data. For the purposes of demonstration, we will use only the "No boundary" (NB) and the "Hard boundary" (HB conditions). The encoding table has a stimCategory field, which tells us the type of boundary corresponding to a given trial.
+
+
+```{code-cell} ipython3
+stimCategory = np.array(
+ encoding_table.stimCategory
+) # Get the scene boundary type for all trials
+
+# What does this look like?
+print(stimCategory)
+```
+
+Trials marked 0 correspond to NB, while trials marked 2 correspond to HB. Let's extract the trial numbers for NB and HB trials, as shown below:
+
+
+```{code-cell} ipython3
+indxNB = np.where(stimCategory == 0) # NB trial indices
+indxHB = np.where(stimCategory == 2) # HB trial indices
+```
+
+The encoding table also has 3 types of boundary times. For the purposes of our demonstration, we will focus on boundary1 times, and extract them as shown below:
+
+
+```{code-cell} ipython3
+boundary1_time = np.array(encoding_table.boundary1_time) # Get timings of Boundary1
+
+# What does this look like?
+print(boundary1_time)
+```
+
+This contains the timings of all boundaries in this block of trials. Note that we also have the type of boundary for each trial. Let's store the NB and HB boundary timings in separate variables, as Pynapple Ts objects:
+
+
+```{code-cell} ipython3
+NB = nap.Ts(boundary1_time[indxNB]) # NB timings
+HB = nap.Ts(boundary1_time[indxHB]) # HB timings
+```
+
+***
+Peri-Event Time Histogram (PETH)
+------------------
+
+A PETH is a plot where we align a variable of interest (for example, spikes) to an external event (in this case, to boundary times). This visualization helps us infer relationships between the two.
+
+For our demonstration, we will align the spikes of the first unit, which is located in the hippocampus, to the times of NB and HB. You can do a quick check to verify that the first unit is indeed located in the hippocampus, we leave it to you.
+
+With Pynapple, PETHs can be computed with a single line of code!
+
+
+```{code-cell} ipython3
+NB_peth = nap.compute_perievent(
+ spikes[0], NB, minmax=(-0.5, 1)
+) # Compute PETH of unit aligned to NB, for -0.5 to 1s windows
+HB_peth = nap.compute_perievent(
+ spikes[0], HB, minmax=(-0.5, 1)
+) # Compute PETH of unit aligned to HB, for -0.5 to 1s windows
+```
+
+Let's plot the PETH
+
+
+```{code-cell} ipython3
+plt.figure(figsize =(15,8))
+plt.subplot(211) # Plot the figures in 2 rows
+plt.plot(NB_peth.to_tsd(), "o",
+ color=[102 / 255, 204 / 255, 0 / 255],
+ markersize=4)
+plt.axvline(0, linewidth=2, color="k", linestyle="--") # Plot a line at t = 0
+plt.yticks([0, 30]) # Set ticks on Y-axis
+plt.gca().set_yticklabels(["1", "30"]) # Label the ticks
+plt.xlabel("Time from NB (s)") # Time from boundary in seconds, on X-axis
+plt.ylabel("Trial Number") # Trial number on Y-axis
+
+plt.subplot(212)
+plt.plot(HB_peth.to_tsd(), "o",
+ color=[255 / 255, 99 / 255, 71 / 255],
+ markersize=4) # Plot PETH
+plt.axvline(0, linewidth=2, color="k", linestyle="--") # Plot a line at t = 0
+plt.yticks([0, 30]) # Set ticks on Y-axis
+plt.gca().set_yticklabels(["1", "30"]) # Label the ticks
+plt.xlabel("Time from HB (s)") # Time from boundary in seconds, on X-axis
+plt.ylabel("Trial Number") # Trial number on Y-axis
+plt.subplots_adjust(wspace=0.2, hspace=0.5, top=0.85)
+```
+
+Awesome! From the PETH, we can see that this neuron fires after boundary onset in HB trials. This is an example of what the authors describe [here](https://www.nature.com/articles/s41593-022-01020-w) as a boundary cell.
+
+
++++
+
+***
+PETH of firing rate for NB and HB cells
+------------------
+
+Now that we have the PETH of spiking, we can go one step further. We will plot the mean firing rate of this cell aligned to the boundary for each trial type. Doing this in Pynapple is very simple!
+
+Use Pynapple to compute binned spike counts
+
+
+```{code-cell} ipython3
+bin_size = 0.01
+counts_NB = NB_peth.count(bin_size) # Spike counts binned in 10ms steps, for NB trials
+counts_HB = HB_peth.count(bin_size) # Spike counts binned in 10ms steps, for HB trials
+```
+
+Compute firing rate for both trial types
+
+
+```{code-cell} ipython3
+fr_NB = counts_NB / bin_size
+fr_HB = counts_HB / bin_size
+```
+
+Smooth the firing rate with a gaussian window with std=4*bin_size
+
+
+```{code-cell} ipython3
+counts_NB = counts_NB.smooth(bin_size*4)
+counts_HB = counts_HB.smooth(bin_size*4)
+```
+
+Compute the mean firing rate for both trial types
+
+
+```{code-cell} ipython3
+meanfr_NB = fr_NB.mean(axis=1)
+meanfr_HB = fr_HB.mean(axis=1)
+```
+
+Compute standard error of mean (SEM) of the firing rate for both trial types
+
+
+```{code-cell} ipython3
+from scipy.stats import sem
+error_NB = sem(fr_NB, axis=1)
+error_HB = sem(fr_HB, axis=1)
+```
+
+Plot the mean +/- SEM of firing rate for both trial types
+
+
+```{code-cell} ipython3
+plt.figure(figsize =(15,8))
+plt.plot(
+ meanfr_NB, color=[102 / 255, 204 / 255, 0 / 255], label="NB"
+) # Plot mean firing rate for NB trials
+
+# Plot SEM for NB trials
+plt.fill_between(
+ meanfr_NB.index.values,
+ meanfr_NB.values - error_NB,
+ meanfr_NB.values + error_NB,
+ color=[102 / 255, 204 / 255, 0 / 255],
+ alpha=0.2,
+)
+
+plt.plot(
+ meanfr_HB, color=[255 / 255, 99 / 255, 71 / 255], label="HB"
+) # Plot mean firing rate for HB trials
+
+# Plot SEM for NB trials
+plt.fill_between(
+ meanfr_HB.index.values,
+ meanfr_HB.values - error_HB,
+ meanfr_HB.values + error_HB,
+ color=[255 / 255, 99 / 255, 71 / 255],
+ alpha=0.2,
+)
+
+plt.axvline(0, linewidth=2, color="k", linestyle="--") # Plot a line at t = 0
+plt.xlabel("Time from boundary (s)") # Time from boundary in seconds, on X-axis
+plt.ylabel("Firing rate (Hz)") # Firing rate in Hz on Y-axis
+plt.legend(loc="upper right")
+```
+
+This plot verifies what we visualized in the PETH rasters above, that this cell responds to a hard boundary. Hence, it is a boundary cell. To learn more about these cells, please check out the original study [here](https://www.nature.com/articles/s41593-022-01020-w).
+
+I hope this tutorial was helpful. If you have any questions, comments or suggestions, please feel free to reach out to the Pynapple Team!
+
+:::{card}
+Authors
+^^^
+Dhruv Mehrotra
+
+Guillaume Viejo
+
+:::
\ No newline at end of file
diff --git a/doc/examples/tutorial_phase_preferences.md b/doc/examples/tutorial_phase_preferences.md
new file mode 100644
index 00000000..c98f9748
--- /dev/null
+++ b/doc/examples/tutorial_phase_preferences.md
@@ -0,0 +1,283 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+
+Spikes-phase coupling
+=====================
+
+In this tutorial we will learn how to isolate phase information using band-pass filtering and combine it
+with spiking data, to find phase preferences of spiking units.
+
+Specifically, we will examine LFP and spiking data from a period of REM sleep, after traversal of a linear track.
+
+
+```{code-cell} ipython3
+import math
+import os
+
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+import requests
+import scipy
+import seaborn as sns
+import tqdm
+import pynapple as nap
+
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+
+```
+
+***
+Downloading the data
+------------------
+Let's download the data and save it locally
+
+
+```{code-cell} ipython3
+path = "Achilles_10252013_EEG.nwb"
+if path not in os.listdir("."):
+ r = requests.get(f"https://osf.io/2dfvp/download", stream=True)
+ block_size = 1024 * 1024
+ with open(path, "wb") as f:
+ for data in tqdm.tqdm(
+ r.iter_content(block_size),
+ unit="MB",
+ unit_scale=True,
+ total=math.ceil(int(r.headers.get("content-length", 0)) // block_size),
+ ):
+ f.write(data)
+```
+
+***
+Loading the data
+------------------
+Let's load and print the full dataset.
+
+
+```{code-cell} ipython3
+data = nap.load_file(path)
+FS = 1250 # We know from the methods of the paper
+print(data)
+```
+
+***
+Selecting slices
+-----------------------------------
+For later visualization, we define an interval of 3 seconds of data during REM sleep.
+
+
+```{code-cell} ipython3
+ep_ex_rem = nap.IntervalSet(
+ data["rem"]["start"][0] + 97.0,
+ data["rem"]["start"][0] + 100.0,
+)
+```
+
+Here we restrict the lfp to the REM epochs.
+
+
+```{code-cell} ipython3
+tsd_rem = data["eeg"][:,0].restrict(data["rem"])
+
+# We will also extract spike times from all units in our dataset
+# which occur during REM sleep
+spikes = data["units"].restrict(data["rem"])
+```
+
+***
+Plotting the LFP Activity
+-----------------------------------
+We should first plot our REM Local Field Potential data.
+
+
+```{code-cell} ipython3
+fig, ax = plt.subplots(1, constrained_layout=True, figsize=(10, 3))
+ax.plot(tsd_rem.restrict(ep_ex_rem))
+ax.set_title("REM Local Field Potential")
+ax.set_ylabel("LFP (a.u.)")
+ax.set_xlabel("time (s)")
+```
+
+***
+Getting the Wavelet Decomposition
+-----------------------------------
+As we would expect, it looks like we have a very strong theta oscillation within our data
+- this is a common feature of REM sleep. Let's perform a wavelet decomposition,
+as we did in the last tutorial, to see get a more informative breakdown of the
+frequencies present in the data.
+
+We must define the frequency set that we'd like to use for our decomposition.
+
+
+```{code-cell} ipython3
+freqs = np.geomspace(5, 200, 25)
+```
+
+We compute the wavelet transform on our LFP data (only during the example interval).
+
+
+```{code-cell} ipython3
+cwt_rem = nap.compute_wavelet_transform(tsd_rem.restrict(ep_ex_rem), fs=FS, freqs=freqs)
+```
+
+***
+Now let's plot the calculated wavelet scalogram.
+
+
+```{code-cell} ipython3
+# Define wavelet decomposition plotting function
+def plot_timefrequency(freqs, powers, ax=None):
+ im = ax.imshow(np.abs(powers), aspect="auto")
+ ax.invert_yaxis()
+ ax.set_xlabel("Time (s)")
+ ax.set_ylabel("Frequency (Hz)")
+ ax.get_xaxis().set_visible(False)
+ ax.set(yticks=np.arange(len(freqs))[::2], yticklabels=np.rint(freqs[::2]))
+ ax.grid(False)
+ return im
+
+fig = plt.figure(constrained_layout=True, figsize=(10, 6))
+fig.suptitle("Wavelet Decomposition")
+gs = plt.GridSpec(2, 1, figure=fig, height_ratios=[1.0, 0.3])
+
+ax0 = plt.subplot(gs[0, 0])
+im = plot_timefrequency(freqs, np.transpose(cwt_rem[:, :].values), ax=ax0)
+cbar = fig.colorbar(im, ax=ax0, orientation="vertical")
+
+ax1 = plt.subplot(gs[1, 0])
+ax1.plot(tsd_rem.restrict(ep_ex_rem))
+ax1.set_ylabel("LFP (a.u.)")
+ax1.set_xlabel("Time (s)")
+ax1.margins(0)
+```
+
+***
+Filtering Theta
+---------------
+
+As expected, there is a strong 8Hz component during REM sleep. We can filter it using the function `nap.apply_bandpass_filter`.
+
+
+```{code-cell} ipython3
+theta_band = nap.apply_bandpass_filter(tsd_rem, cutoff=(6.0, 10.0), fs=FS)
+```
+
+We can plot the original signal and the filtered signal.
+
+
+```{code-cell} ipython3
+plt.figure(constrained_layout=True, figsize=(12, 3))
+plt.plot(tsd_rem.restrict(ep_ex_rem), alpha=0.5)
+plt.plot(theta_band.restrict(ep_ex_rem))
+plt.xlabel("Time (s)")
+plt.show()
+```
+
+***
+Computing phase
+---------------
+
+From the filtered signal, it is easy to get the phase using the Hilbert transform. Here we use scipy Hilbert method.
+
+
+```{code-cell} ipython3
+from scipy import signal
+
+theta_phase = nap.Tsd(t=theta_band.t, d=np.angle(signal.hilbert(theta_band)))
+```
+
+Let's plot the phase.
+
+
+```{code-cell} ipython3
+plt.figure(constrained_layout=True, figsize=(12, 3))
+plt.subplot(211)
+plt.plot(tsd_rem.restrict(ep_ex_rem), alpha=0.5)
+plt.plot(theta_band.restrict(ep_ex_rem))
+plt.subplot(212)
+plt.plot(theta_phase.restrict(ep_ex_rem), color='r')
+plt.ylabel("Phase (rad)")
+plt.xlabel("Time (s)")
+plt.show()
+```
+
+***
+Finding Phase of Spikes
+-----------------------
+Now that we have the phase of our theta wavelet, and our spike times, we can find the phase firing preferences
+of each of the units using the `compute_1d_tuning_curves` function.
+
+We will start by throwing away cells which do not have a high enough firing rate during our interval.
+
+
+```{code-cell} ipython3
+spikes = spikes[spikes.rate > 5.0]
+```
+
+The feature is the theta phase during REM sleep.
+
+
+```{code-cell} ipython3
+phase_modulation = nap.compute_1d_tuning_curves(
+ group=spikes, feature=theta_phase, nb_bins=61, minmax=(-np.pi, np.pi)
+)
+```
+
+Let's plot the first 3 neurons.
+
+
+```{code-cell} ipython3
+plt.figure(constrained_layout=True, figsize = (12, 3))
+for i in range(3):
+ plt.subplot(1,3,i+1)
+ plt.plot(phase_modulation.iloc[:,i])
+ plt.xlabel("Phase (rad)")
+ plt.ylabel("Firing rate (Hz)")
+plt.show()
+```
+
+There is clearly a strong modulation for the third neuron.
+Finally, we can use the function `value_from` to align each spikes to the corresponding phase position and overlay
+it with the LFP.
+
+
+```{code-cell} ipython3
+spike_phase = spikes[spikes.index[3]].value_from(theta_phase)
+```
+
+Let's plot it.
+
+
+```{code-cell} ipython3
+plt.figure(constrained_layout=True, figsize=(12, 3))
+plt.subplot(211)
+plt.plot(tsd_rem.restrict(ep_ex_rem), alpha=0.5)
+plt.plot(theta_band.restrict(ep_ex_rem))
+plt.subplot(212)
+plt.plot(theta_phase.restrict(ep_ex_rem), alpha=0.5)
+plt.plot(spike_phase.restrict(ep_ex_rem), 'o')
+plt.ylabel("Phase (rad)")
+plt.xlabel("Time (s)")
+plt.show()
+```
+
+:::{card}
+Authors
+^^^
+Kipp Freud (https://kippfreud.com/)
+
+Guillaume Viejo
+
+:::
\ No newline at end of file
diff --git a/doc/examples/tutorial_pynapple_dandi.md b/doc/examples/tutorial_pynapple_dandi.md
new file mode 100644
index 00000000..8c4b0c99
--- /dev/null
+++ b/doc/examples/tutorial_pynapple_dandi.md
@@ -0,0 +1,150 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+
+Streaming data from DANDI
+=========================
+
+This script shows how to stream data from the [DANDI Archive](https://dandiarchive.org/) all the way to pynapple.
+
+***
+Prelude
+-------
+
+The data used in this tutorial were used in this publication:
+__Sargolini, Francesca, et al. "Conjunctive representation of position, direction, and velocity in entorhinal cortex." Science 312.5774 (2006): 758-762.__
+The data can be found on the DANDI Archive in [Dandiset 000582](https://dandiarchive.org/dandiset/000582).
+
+***
+DANDI
+-----
+DANDI allows you to stream data without downloading all the files. In this case the data extracted from the NWB file are stored in the nwb-cache folder.
+
+
+```{code-cell} ipython3
+from pynwb import NWBHDF5IO
+
+from dandi.dandiapi import DandiAPIClient
+import fsspec
+from fsspec.implementations.cached import CachingFileSystem
+import h5py
+
+
+# ecephys
+dandiset_id, filepath = (
+ "000582",
+ "sub-10073/sub-10073_ses-17010302_behavior+ecephys.nwb",
+)
+
+
+with DandiAPIClient() as client:
+ asset = client.get_dandiset(dandiset_id, "draft").get_asset_by_path(filepath)
+ s3_url = asset.get_content_url(follow_redirects=1, strip_query=True)
+
+# first, create a virtual filesystem based on the http protocol
+fs = fsspec.filesystem("http")
+
+# create a cache to save downloaded data to disk (optional)
+fs = CachingFileSystem(
+ fs=fs,
+ cache_storage="nwb-cache", # Local folder for the cache
+)
+
+# next, open the file
+file = h5py.File(fs.open(s3_url, "rb"))
+io = NWBHDF5IO(file=file, load_namespaces=True)
+
+print(io)
+```
+
+Pynapple
+--------
+If opening the NWB works, you can start streaming data straight into pynapple with the `NWBFile` class.
+
+
+```{code-cell} ipython3
+import pynapple as nap
+import matplotlib.pyplot as plt
+import seaborn as sns
+import numpy as np
+
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+
+nwb = nap.NWBFile(io.read())
+
+print(nwb)
+```
+
+We can load the spikes as a TsGroup for inspection.
+
+
+```{code-cell} ipython3
+units = nwb["units"]
+
+print(units)
+```
+
+As well as the position
+
+
+```{code-cell} ipython3
+position = nwb["SpatialSeriesLED1"]
+```
+
+Here we compute the 2d tuning curves
+
+
+```{code-cell} ipython3
+tc, binsxy = nap.compute_2d_tuning_curves(units, position, 20)
+```
+
+Let's plot the tuning curves
+
+
+```{code-cell} ipython3
+plt.figure(figsize=(15, 7))
+for i in tc.keys():
+ plt.subplot(2, 4, i + 1)
+ plt.imshow(tc[i], origin="lower", aspect="auto")
+ plt.title("Unit {}".format(i))
+plt.tight_layout()
+plt.show()
+```
+
+Let's plot the spikes of unit 1 who has a nice grid
+Here I use the function `value_from` to assign to each spike the closest position in time.
+
+
+```{code-cell} ipython3
+plt.figure(figsize=(15, 6))
+plt.subplot(121)
+extent = (
+ np.min(position["x"]),
+ np.max(position["x"]),
+ np.min(position["y"]),
+ np.max(position["y"]),
+)
+plt.imshow(tc[1], origin="lower", extent=extent, aspect="auto")
+plt.xlabel("x")
+plt.ylabel("y")
+
+plt.subplot(122)
+plt.plot(position["y"], position["x"], color="grey")
+spk_pos = units[1].value_from(position)
+plt.plot(spk_pos["y"], spk_pos["x"], "o", color="red", markersize=5, alpha=0.5)
+plt.xlabel("x")
+plt.ylabel("y")
+plt.tight_layout()
+plt.show()
+```
diff --git a/doc/examples/tutorial_wavelet_decomposition.md b/doc/examples/tutorial_wavelet_decomposition.md
new file mode 100644
index 00000000..c16c7b06
--- /dev/null
+++ b/doc/examples/tutorial_wavelet_decomposition.md
@@ -0,0 +1,539 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3 (ipykernel)
+ language: python
+ name: python3
+---
+
+
+Wavelet Transform
+============
+This tutorial demonstrates how we can use the signal processing tools within Pynapple to aid with data analysis.
+We will examine the dataset from [Grosmark & Buzsáki (2016)](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4919122/).
+
+Specifically, we will examine Local Field Potential data from a period of active traversal of a linear track.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+import math
+import os
+
+import matplotlib.colors as mcolors
+import matplotlib.pyplot as plt
+import numpy as np
+import requests
+import seaborn as sns
+import tqdm
+import pynapple as nap
+
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+
+```
+
+***
+Downloading the data
+------------------
+Let's download the data and save it locally
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+path = "Achilles_10252013_EEG.nwb"
+if path not in os.listdir("."):
+ r = requests.get(f"https://osf.io/2dfvp/download", stream=True)
+ block_size = 1024 * 1024
+ with open(path, "wb") as f:
+ for data in tqdm.tqdm(
+ r.iter_content(block_size),
+ unit="MB",
+ unit_scale=True,
+ total=math.ceil(int(r.headers.get("content-length", 0)) // block_size),
+ ):
+ f.write(data)
+# Let's load and print the full dataset.
+data = nap.load_file(path)
+print(data)
+```
+
+First we can extract the data from the NWB. The local field potential has been downsampled to 1250Hz. We will call it `eeg`.
+
+The `time_support` of the object `data['position']` contains the interval for which the rat was running along the linear track. We will call it `wake_ep`.
+
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+FS = 1250
+
+eeg = data["eeg"]
+
+wake_ep = data["position"].time_support
+```
+
+***
+Selecting example
+-----------------------------------
+We will consider a single run of the experiment - where the rodent completes a full traversal of the linear track,
+followed by 4 seconds of post-traversal activity.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+forward_ep = data["forward_ep"]
+RUN_interval = nap.IntervalSet(forward_ep.start[7], forward_ep.end[7] + 4.0)
+
+eeg_example = eeg.restrict(RUN_interval)[:, 0]
+pos_example = data["position"].restrict(RUN_interval)
+```
+
+***
+Plotting the LFP and Behavioural Activity
+-----------------------------------
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+fig = plt.figure(constrained_layout=True, figsize=(10, 6))
+axd = fig.subplot_mosaic(
+ [["ephys"], ["pos"]],
+ height_ratios=[1, 0.4],
+)
+axd["ephys"].plot(eeg_example, label="CA1")
+axd["ephys"].set_title("EEG (1250 Hz)")
+axd["ephys"].set_ylabel("LFP (a.u.)")
+axd["ephys"].set_xlabel("time (s)")
+axd["ephys"].margins(0)
+axd["ephys"].legend()
+axd["pos"].plot(pos_example, color="black")
+axd["pos"].margins(0)
+axd["pos"].set_xlabel("time (s)")
+axd["pos"].set_ylabel("Linearized Position")
+axd["pos"].set_xlim(RUN_interval[0, 0], RUN_interval[0, 1])
+```
+
+In the top panel, we can see the lfp trace as a function of time, and on the bottom the mouse position on the linear
+track as a function of time. Position 0 and 1 correspond to the start and end of the trial respectively.
+
+
++++
+
+***
+Getting the LFP Spectrogram
+-----------------------------------
+Let's take the Fourier transform of our data to get an initial insight into the dominant frequencies during exploration (`wake_ep`).
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+power = nap.compute_power_spectral_density(eeg, fs=FS, ep=wake_ep, norm=True)
+
+print(power)
+```
+
+***
+The returned object is a pandas dataframe which uses frequencies as indexes and spectral power as values.
+
+Let's plot the power between 1 and 100 Hz.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+fig, ax = plt.subplots(1, constrained_layout=True, figsize=(10, 4))
+ax.plot(
+ np.abs(power[(power.index >= 1.0) & (power.index <= 100)]),
+ alpha=0.5,
+ label="LFP Frequency Power",
+)
+ax.axvspan(6, 10, color="red", alpha=0.1)
+ax.set_xlabel("Freq (Hz)")
+ax.set_ylabel("Frequency Power")
+ax.set_title("LFP Fourier Decomposition")
+ax.legend()
+```
+
+The red area outlines the theta rhythm (6-10 Hz) which is proeminent in hippocampal LFP.
+Hippocampal theta rhythm appears mostly when the animal is running [1].
+We can check it here by separating the wake epochs (`wake_ep`) into run epochs (`run_ep`) and rest epochs (`rest_ep`).
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+# The run epoch is the portion of the data for which we have position data
+run_ep = data["position"].dropna().find_support(1)
+# The rest epoch is the data at all points where we do not have position data
+rest_ep = wake_ep.set_diff(run_ep)
+```
+
+`run_ep` and `rest_ep` are IntervalSet with discontinuous epoch.
+
+The function `nap.compute_power_spectral_density` takes signal with a single epoch to avoid artefacts between epochs jumps.
+
+To compare `run_ep` with `rest_ep`, we can use the function `nap.compute_mean_power_spectral_density` which avearge the FFT over multiple epochs of same duration. The parameter `interval_size` controls the duration of those epochs.
+
+In this case, `interval_size` is equal to 1.5 seconds.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+power_run = nap.compute_mean_power_spectral_density(
+ eeg, 1.5, fs=FS, ep=run_ep, norm=True
+)
+power_rest = nap.compute_mean_power_spectral_density(
+ eeg, 1.5, fs=FS, ep=rest_ep, norm=True
+)
+```
+
+`power_run` and `power_rest` are the power spectral density when the animal is respectively running and resting.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+fig, ax = plt.subplots(1, constrained_layout=True, figsize=(10, 4))
+ax.plot(
+ np.abs(power_run[(power_run.index >= 3.0) & (power_run.index <= 30)]),
+ alpha=1,
+ label="Run",
+ linewidth=2,
+)
+ax.plot(
+ np.abs(power_rest[(power_rest.index >= 3.0) & (power_rest.index <= 30)]),
+ alpha=1,
+ label="Rest",
+ linewidth=2,
+)
+ax.axvspan(6, 10, color="red", alpha=0.1)
+ax.set_xlabel("Freq (Hz)")
+ax.set_ylabel("Frequency Power")
+ax.set_title("LFP Fourier Decomposition")
+ax.legend()
+```
+
+***
+Getting the Wavelet Decomposition
+-----------------------------------
+Overall, the prominent frequencies in the data vary over time. The LFP characteristics may be different when the animal is running along the track, and when it is finished.
+Let's generate a wavelet decomposition to look more closely at the changing frequency powers over time.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+# We must define the frequency set that we'd like to use for our decomposition
+freqs = np.geomspace(3, 250, 100)
+```
+
+Compute and print the wavelet transform on our LFP data
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+mwt_RUN = nap.compute_wavelet_transform(eeg_example, fs=FS, freqs=freqs)
+```
+
+`mwt_RUN` is a TsdFrame with each column being the convolution with one wavelet at a particular frequency.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+print(mwt_RUN)
+```
+
+***
+Now let's plot it.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+fig = plt.figure(constrained_layout=True, figsize=(10, 6))
+gs = plt.GridSpec(3, 1, figure=fig, height_ratios=[1.0, 0.5, 0.1])
+
+ax0 = plt.subplot(gs[0, 0])
+pcmesh = ax0.pcolormesh(mwt_RUN.t, freqs, np.transpose(np.abs(mwt_RUN)))
+ax0.grid(False)
+ax0.set_yscale("log")
+ax0.set_title("Wavelet Decomposition")
+ax0.set_ylabel("Frequency (Hz)")
+cbar = plt.colorbar(pcmesh, ax=ax0, orientation="vertical")
+ax0.set_ylabel("Amplitude")
+
+ax1 = plt.subplot(gs[1, 0], sharex=ax0)
+ax1.plot(eeg_example)
+ax1.set_ylabel("LFP (a.u.)")
+
+ax1 = plt.subplot(gs[2, 0], sharex=ax0)
+ax1.plot(pos_example, color="black")
+ax1.set_xlabel("Time (s)")
+ax1.set_ylabel("Pos.")
+```
+
+***
+Visualizing Theta Band Power
+-----------------------------------
+There seems to be a strong theta frequency present in the data during the maze traversal.
+Let's plot the estimated 6-10Hz component of the wavelet decomposition on top of our data, and see how well they match up.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+theta_freq_index = np.logical_and(freqs > 6, freqs < 10)
+
+
+# Extract its real component, as well as its power envelope
+theta_band_reconstruction = np.mean(mwt_RUN[:, theta_freq_index], 1)
+theta_band_power_envelope = np.abs(theta_band_reconstruction)
+```
+
+***
+Now let's visualise the theta band component of the signal over time.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+fig = plt.figure(constrained_layout=True, figsize=(10, 6))
+gs = plt.GridSpec(2, 1, figure=fig, height_ratios=[1.0, 0.9])
+ax0 = plt.subplot(gs[0, 0])
+ax0.plot(eeg_example, label="CA1")
+ax0.set_title("EEG (1250 Hz)")
+ax0.set_ylabel("LFP (a.u.)")
+ax0.set_xlabel("time (s)")
+ax0.legend()
+ax1 = plt.subplot(gs[1, 0])
+ax1.plot(np.real(theta_band_reconstruction), label="6-10 Hz oscillations")
+ax1.plot(theta_band_power_envelope, label="6-10 Hz power envelope")
+ax1.set_xlabel("time (s)")
+ax1.set_ylabel("Wavelet transform")
+ax1.legend()
+```
+
+***
+We observe that the theta power is far stronger during the first 4 seconds of the dataset, during which the rat
+is traversing the linear track.
+
+
++++
+
+***
+Visualizing High Frequency Oscillation
+-----------------------------------
+There also seem to be peaks in the 200Hz frequency power after traversal of thew maze is complete. Here we use the interval (18356, 18357.5) seconds to zoom in.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+zoom_ep = nap.IntervalSet(18356.0, 18357.5)
+
+mwt_zoom = mwt_RUN.restrict(zoom_ep)
+
+fig = plt.figure(constrained_layout=True, figsize=(10, 6))
+gs = plt.GridSpec(2, 1, figure=fig, height_ratios=[1.0, 0.5])
+ax0 = plt.subplot(gs[0, 0])
+pcmesh = ax0.pcolormesh(mwt_zoom.t, freqs, np.transpose(np.abs(mwt_zoom)))
+ax0.grid(False)
+ax0.set_yscale("log")
+ax0.set_title("Wavelet Decomposition")
+ax0.set_ylabel("Frequency (Hz)")
+cbar = plt.colorbar(pcmesh, ax=ax0, orientation="vertical")
+ax0.set_label("Amplitude")
+
+ax1 = plt.subplot(gs[1, 0], sharex=ax0)
+ax1.plot(eeg_example.restrict(zoom_ep))
+ax1.set_ylabel("LFP (a.u.)")
+ax1.set_xlabel("Time (s)")
+```
+
+Those events are called Sharp-waves ripples [2].
+
+Among other methods, we can use the Wavelet decomposition to isolate them. In this case, we will look at the power of the wavelets for frequencies between 150 to 250 Hz.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+ripple_freq_index = np.logical_and(freqs > 150, freqs < 250)
+```
+
+We can compute the mean power for this frequency band.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+ripple_power = np.mean(np.abs(mwt_RUN[:, ripple_freq_index]), 1)
+```
+
+Now let's visualise the 150-250 Hz mean amplitude of the wavelet decomposition over time
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+fig = plt.figure(constrained_layout=True, figsize=(10, 5))
+gs = plt.GridSpec(2, 1, figure=fig, height_ratios=[1.0, 0.5])
+ax0 = plt.subplot(gs[0, 0])
+ax0.plot(eeg_example.restrict(zoom_ep), label="CA1")
+ax0.set_ylabel("LFP (a.u.)")
+ax0.set_title(f"EEG (1250 Hz)")
+ax1 = plt.subplot(gs[1, 0])
+ax1.legend()
+ax1.plot(ripple_power.restrict(zoom_ep), label="150-250 Hz")
+ax1.legend()
+ax1.set_ylabel("Mean Amplitude")
+ax1.set_xlabel("Time (s)")
+```
+
+It is then easy to isolate ripple times by using the pynapple functions `smooth` and `threshold`. In the following lines, `ripples` is smoothed with a gaussian kernel of size 0.005 second and thesholded with a value of 100.
+
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+smoothed_ripple_power = ripple_power.smooth(0.005)
+
+threshold_ripple_power = smoothed_ripple_power.threshold(100)
+```
+
+`threshold_ripple_power` contains all the time points above 100. The ripple epochs are contained in the `time_support` of the threshold time series. Here we call it `rip_ep`.
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+rip_ep = threshold_ripple_power.time_support
+```
+
+Now let's plot the ripples epoch as well as the smoothed ripple power.
+
+We can also plot `rip_ep` as vertical boxes to see if the detection is accurate
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+fig = plt.figure(constrained_layout=True, figsize=(10, 5))
+gs = plt.GridSpec(2, 1, figure=fig, height_ratios=[1.0, 0.5])
+ax0 = plt.subplot(gs[0, 0])
+ax0.plot(eeg_example.restrict(zoom_ep), label="CA1")
+for i, (s, e) in enumerate(rip_ep.intersect(zoom_ep).values):
+ ax0.axvspan(s, e, color=list(mcolors.TABLEAU_COLORS.keys())[i], alpha=0.2, ec=None)
+ax0.set_ylabel("LFP (a.u.)")
+ax0.set_title(f"EEG (1250 Hz)")
+ax1 = plt.subplot(gs[1, 0])
+ax1.legend()
+ax1.plot(ripple_power.restrict(zoom_ep), label="150-250 Hz")
+ax1.plot(smoothed_ripple_power.restrict(zoom_ep))
+for i, (s, e) in enumerate(rip_ep.intersect(zoom_ep).values):
+ ax1.axvspan(s, e, color=list(mcolors.TABLEAU_COLORS.keys())[i], alpha=0.2, ec=None)
+ax1.legend()
+ax1.set_ylabel("Mean Amplitude")
+ax1.set_xlabel("Time (s)")
+```
+
+Finally, let's zoom in on each of our isolated ripples
+
+
+```{code-cell} ipython3
+---
+jupyter:
+ outputs_hidden: false
+---
+fig = plt.figure(constrained_layout=True, figsize=(10, 5))
+gs = plt.GridSpec(2, 2, figure=fig, height_ratios=[1.0, 1.0])
+buffer = 0.075
+plt.suptitle("Isolated Sharp Wave Ripples")
+for i, (s, e) in enumerate(rip_ep.intersect(zoom_ep).values):
+ ax = plt.subplot(gs[int(i / 2), i % 2])
+ ax.plot(eeg_example.restrict(nap.IntervalSet(s - buffer, e + buffer)))
+ ax.axvspan(s, e, color=list(mcolors.TABLEAU_COLORS.keys())[i], alpha=0.2, ec=None)
+ ax.set_xlim(s - buffer, e + buffer)
+ ax.set_xlabel("Time (s)")
+ ax.set_ylabel("LFP (a.u.)")
+```
+
+***
+References
+-----------------------------------
+
+[1] Hasselmo, M. E., & Stern, C. E. (2014). Theta rhythm and the encoding and retrieval of space and time. Neuroimage, 85, 656-666.
+
+[2] Buzsáki, G. (2015). Hippocampal sharp wave‐ripple: A cognitive biomarker for episodic memory and planning. Hippocampus, 25(10), 1073-1188.
+
+
+:::{card}
+Authors
+^^^
+Kipp Freud (https://kippfreud.com/)
+
+Guillaume Viejo
+
+:::
\ No newline at end of file
diff --git a/doc/external.md b/doc/external.md
new file mode 100644
index 00000000..ffcb9620
--- /dev/null
+++ b/doc/external.md
@@ -0,0 +1,21 @@
+# External Projects
+
+Pynapple has been designed as a lightweight package for representing time series and epochs in system neuroscience.
+As such, it can function as a foundational element for other analysis packages handling time series data. Here we keep track of external projects that uses pynapple.
+
+
+## NeMoS
+
+![image](https://raw.githubusercontent.com/flatironinstitute/nemos/main/docs/assets/glm_features_scheme.svg)
+
+[NeMoS](https://nemos.readthedocs.io/en/stable/) is a statistical modeling framework optimized for systems neuroscience and powered by JAX. It streamlines the process of defining and selecting models, through a collection of easy-to-use methods for feature design.
+
+The core of nemos includes GPU-accelerated, well-tested implementations of standard statistical models, currently focusing on the Generalized Linear Model (GLM).
+
+Check out this [page](https://nemos.readthedocs.io/en/stable/generated/neural_modeling/) for many examples of neural modelling using nemos and pynapple.
+
+```{eval-rst}
+.. Note::
+ Nemos is build on top of [jax](https://jax.readthedocs.io/en/latest/index.html), a library for high-performance numerical computing.
+ To ensure full compatibility with nemos, consider installing [pynajax](https://github.com/pynapple-org/pynajax), a pynapple backend for jax.
+```
\ No newline at end of file
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 00000000..5a1dffac
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,124 @@
+:html_theme.sidebar_secondary.remove:
+
+
+pynapple: python neural analysis package
+========================================
+
+.. toctree::
+ :maxdepth: 1
+ :hidden:
+
+ Installing
+ User guide
+ Examples
+ API
+ Releases
+ External projects
+ GPU acceleration
+ Citing
+
+
+|
+
+.. grid:: 1 1 2 2
+
+ .. grid-item::
+
+ Pynapple is a light-weight python library for
+ neurophysiological data analysis.
+
+ The goal is to offer a versatile set of tools to
+ study typical data in the field, i.e. time series
+ (spike times, behavioral events, etc.) and time intervals
+ (trials, brain states, etc.).
+
+ It also provides users with generic functions for
+ neuroscience such as tuning curves, cross-correlograms
+ and filtering.
+
+ .. grid:: auto
+
+ .. button-ref:: installing
+ :color: primary
+ :shadow:
+
+ Installing
+
+ .. button-ref:: user_guide/01_introduction_to_pynapple
+ :color: primary
+ :shadow:
+
+ Getting started
+
+ .. button-ref:: citing
+ :color: primary
+ :shadow:
+
+ Citing
+
+
+|
+
+.. grid:: 1 1 7 7
+ :gutter: 2
+
+ .. grid-item-card:: Time Series
+ :text-align: center
+ :link: ./user_guide/04_core_methods.html
+
+ .. image:: _static/example_thumbs/timeseries.svg
+ :class: dark-light
+
+ .. grid-item-card:: Decoding
+ :text-align: center
+ :link: ./user_guide/07_decoding.html
+
+ .. image:: _static/example_thumbs/decoding.svg
+ :class: dark-light
+
+ .. grid-item-card:: Perievent
+ :text-align: center
+ :link: ./user_guide/08_perievent.html
+
+ .. image:: _static/example_thumbs/perievent.svg
+ :class: dark-light
+
+ .. grid-item-card:: Correlation
+ :text-align: center
+ :link: ./user_guide/05_correlograms.html
+
+ .. image:: _static/example_thumbs/correlation.svg
+ :class: dark-light
+
+ .. grid-item-card:: Tuning curves
+ :text-align: center
+ :link: ./user_guide/06_tuning_curves.html
+
+ .. image:: _static/example_thumbs/tuningcurves.svg
+ :class: dark-light
+
+ .. grid-item-card:: Wavelets
+ :text-align: center
+ :link: ./user_guide/11_wavelets.html
+
+ .. image:: _static/example_thumbs/wavelets.svg
+ :class: dark-light
+
+ .. grid-item-card:: Filtering
+ :text-align: center
+ :link: ./user_guide/07_decoding.html
+
+ .. image:: _static/example_thumbs/filtering.svg
+ :class: dark-light
+
+
+
+Support
+~~~~~~~
+
+This package is supported by the Center for Computational Neuroscience, in the Flatiron Institute of the Simons Foundation
+
+.. image:: _static/CCN-logo-wText.png
+ :width: 200px
+ :target: https://www.simonsfoundation.org/flatiron/center-for-computational-neuroscience/
+
diff --git a/doc/installing.md b/doc/installing.md
new file mode 100644
index 00000000..14166ec1
--- /dev/null
+++ b/doc/installing.md
@@ -0,0 +1,56 @@
+# Installing and getting started
+
+The best way to install pynapple is with pip within a new [conda](https://docs.conda.io/en/latest/) environment :
+
+
+```
+conda create --name pynapple pip python
+conda activate pynapple
+pip install pynapple
+```
+
+or directly from the source code:
+
+```
+conda create --name pynapple pip python
+conda activate pynapple
+
+# clone the repository
+git clone https://github.com/pynapple-org/pynapple.git
+cd pynapple
+
+# Install in editable mode with `-e` or, equivalently, `--editable`
+pip install -e .
+```
+
+## Getting started
+
+
+Once installed, you can import pynapple with
+
+```python
+import pynapple as nap
+```
+
+To get started with pynapple, please read the [introduction](user_guide/01_introduction_to_pynapple) that introduce the minimal concepts.
+
+## Dependencies
+
+
+### Supported python versions
+
+
+ - Python 3.8+
+
+### Mandatory dependencies
+
+
+ - pandas
+ - numpy
+ - scipy
+ - numba
+ - pynwb 2.0
+ - tabulate
+ - h5py
+ - rich
+
diff --git a/doc/make.bat b/doc/make.bat
new file mode 100644
index 00000000..32bb2452
--- /dev/null
+++ b/doc/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=.
+set BUILDDIR=_build
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.https://www.sphinx-doc.org/
+ exit /b 1
+)
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
diff --git a/doc/pynajax.md b/doc/pynajax.md
new file mode 100644
index 00000000..b06b021f
--- /dev/null
+++ b/doc/pynajax.md
@@ -0,0 +1,73 @@
+# Accelerating pynapple with jax
+
+## Motivation
+
+```{eval-rst}
+.. Warning::
+
+ New in `0.6.6`
+```
+
+Multiple python packages exist for high-performance computing. Internally, pynapple makes extensive use of [numba](https://numba.pydata.org/) for accelerating some functions. Numba is a stable package that provide speed gains with minimal installation issues when running on CPUs.
+
+Another high-performance toolbox for numerical analysis is
+[jax](https://jax.readthedocs.io/en/latest/index.html). In addition to accelerating python code on CPUs, GPUs, and TPUs, it provides a special representation of arrays using the [jax Array object](https://jax.readthedocs.io/en/latest/jax-101/01-jax-basics.html). Unfortunately, jax Array is incompatible with Numba. To solve this issue, we developped [pynajax](https://github.com/pynapple-org/pynajax).
+
+Pynajax is an accelerated backend for pynapple built on top on jax. It offers a fast acceleration for some pynapple functions using CPU or GPU. Here is a minimal example on how to use pynajax:
+
+``` bash
+pip install pynajax
+```
+
+
+
+``` python
+import pynapple as nap
+import numpy as np
+
+# Changed the backend from 'numba' to 'jax'
+nap.nap_config.set_backend("jax")
+
+# This will convert the numpy array to a jax Array.
+tsd = nap.Tsd(t=np.arange(100), d=np.random.randn(100))
+
+# This will run on GPU or CPU depending on the jax installation
+tsd.convolve(np.ones(11))
+```
+
+This [documentation page](https://pynapple-org.github.io/pynajax/generated/gallery/) keeps tracks of the list of pynapple functions that can be jax-accelerated as well as their performances compared to pure numba.
+
+## Installation issues
+
+To get the best of the pynajax backend, jax needs to use the GPU.
+
+While installing pynajax will install all the dependencies necessary to use jax, it does not guarantee
+the use of the GPU.
+
+To check if jax is using the GPU, you can run the following python commands :
+
+- no GPU found :
+
+ ```python
+ import jax
+ print(jax.devices())
+ [CpuDevice(id=0)]
+ ```
+
+- GPU found :
+
+ ```python
+ import jax
+ print(jax.devices())
+ [cuda(id=0)]
+ ```
+
+Support for installing `JAX` for GPU users can be found in the [jax documentation](https://jax.readthedocs.io/en/latest/installation.html)
+
+
+## Typical use-case
+
+
+In addition to providing high performance numerical computing, jax can be used as a the backbone for a large scale machine learning model. Thus, pynajax can offer full compatibility between pynapple's time series representation and computational neuroscience models constructed using jax.
+
+An example of a python package using both pynapple and jax is [NeMOs](https://nemos.readthedocs.io/en/stable/).
\ No newline at end of file
diff --git a/doc/releases.md b/doc/releases.md
new file mode 100644
index 00000000..ddf3601d
--- /dev/null
+++ b/doc/releases.md
@@ -0,0 +1,189 @@
+# Releases
+
+## History
+
+
+This package somehow started about 20 years ago in Bruce McNaughton's lab. Dave Redish started the *TSToolbox* package in Matlab.
+Another postdoc in the lab, Francesco Battaglia, then made major contributions to the package. Francesco passed it on to Adrien Peyrache and other trainees in Paris and The Netherlands.
+Around 2016-2017, Luke Sjulson started *TSToolbox2*, still in Matlab and which includes some important changes.
+
+In 2018, Francesco started neuroseries, a Python package built on Pandas. It was quickly adopted in Adrien's lab, especially by Guillaume Viejo, a postdoc in the lab. Gradually, the majority of the lab was using it and new functions were constantly added.
+In 2021, Guillaume and other trainees in Adrien's lab decided to fork from neuroseries and started *pynapple*.
+The core of pynapple is largely built upon neuroseries. Some of the original changes to TSToolbox made by Luke were included in this package, especially the *time_support* property of all ts/tsd objects.
+
+Since 2023, the development of pynapple is lead by [Guillaume Viejo](https://www.simonsfoundation.org/people/guillaume-viejo/)
+and [Edoardo Balzani](https://www.simonsfoundation.org/people/edoardo-balzani/) at the Center for Computational Neuroscience
+of the Flatiron institute.
+
+
+## Releases
+
+### 0.7.1 (2024-09-24)
+
+- Fixing nan issue when computing 1d tuning curve (See issue #334).
+- Refactor tuning curves and correlogram tests.
+- Adding validators decorators for tuning curves and correlogram modules.
+
+### 0.7.0 (2024-09-16)
+
+- Morlet wavelets spectrogram with utility for plotting the wavelets.
+- (Mean) Power spectral density. Returns a Pandas DataFrame.
+- Convolve function works for any dimension of time series and any dimensions of kernel.
+- `dtype` in count function
+- `get_slice`: public method with a simplified API, argument start, end, time_units. returns a slice that matches behavior of Base.get.
+- `_get_slice`: private method, adds the argument "mode" this can be: "after_t", "before_t", "closest_t", "restrict".
+- `split` method for IntervalSet. Argument is `interval_size` in time unit.
+- Changed os import to pathlib.
+- Fixed pickling issue. TsGroup can now be saved as pickle.
+- TsGroup can be created from an iterable of Ts/Tsd objects.
+- IntervalSet can be created from (start, end) pairs
+
+
+### 0.6.6 (2024-05-28)
+
+- Full lazy-loading for NWB file.
+- Parameter `load_array` for time series can prevent loading zarr array
+- Function to merge a list of `TsGroup`
+
+
+### 0.6.5 (2024-05-14)
+
+- Full `pynajax` backend compatibility
+- Fixed `TsdFrame` column slicing
+
+
+### 0.6.4 (2024-04-18)
+
+- Fixing IntervalSet `__repr__`. Tabulate conflict with numpy 1.26.
+
+
+### 0.6.3 (2024-04-17)
+
+- Improving `__repr__` for all objects.
+- TsGroup `__getattr__` and `__setattr__` added to access metadata columns directly
+- TsGroup `__setitem__` now allows changes directly to metadata
+- TsGroup `__getitem__` returns column of metadata if passed as string
+
+
+### 0.6.2 (2024-04-04)
+
+- `smooth` now takes standard deviation in time units
+- Fixed `TsGroup` saving method.
+- `__getattr__` of `BaseTsd` allow numpy functions to be attached as attributes of Tsd objects
+- Added `get` method for `TsGroup`
+- Tsds can be concatenate vertically if time indexes matches.
+
+
+### 0.6.1 (2024-03-03)
+
+- Fixed pynapple `loc` method for new `IntervalSet`
+
+
+### 0.6.0 (2024-03-02)
+
+- Refactoring `IntervalSet` to pure numpy ndarray.
+- Implementing new chain of inheritance for time series with abstract base class. `base_class.Base` holds the temporal methods for all time series and `Ts`. `time_series.BaseTsd` inherit `Base` and implements the common methods for `Tsd`, `TsdFrame` and `Tsd`.
+- Automatic conversion to numpy ndarray for all objects that are numpy-like (typically jax).
+
+
+### 0.5.1 (2024-01-29)
+
+- Implementing `event_trigger_average` for all dimensions.
+- Hiding jitted functions from users.
+
+
+### 0.5.0 (2023-12-12)
+
+- Removing GUI stack from pynapple. To create a NWB file, users need to install nwbmatic (https://github.com/pynapple-org/nwbmatic)
+- Implementing `compute_perievent_continuous`
+- Implementing `convolve` for Tsd, TsdFrame and TsdTensor
+- Implementing `smooth` for fast gaussian smoothing of time series
+
+
+### 0.4.1 (2023-10-30)
+
+- Implementing `get` method that return both an interval or the closest timepoint
+
+
+### 0.4.0 (2023-10-11)
+
+- Implementing the numpy array container approach within pynapple
+- TsdTensor for objects larger than 2 dimensions is now available
+
+
+### 0.3.6 (2023-09-11)
+
+- Fix issue in NWB reader class with units
+- Implement a linear interpolation function.
+
+
+### 0.3.5 (2023-08-08)
+
+- NWB reader class
+- NPZ reader class
+- Folder class for navigating a dataset.
+- Cross-correlograms function can take tuple
+- New doc with mkdocs-gallery
+
+
+### 0.3.4 (2023-06-29)
+
+- `TsGroup.to_tsd` and `Tsd.to_tsgroup` transformations
+- `count` can take IntervalSet
+- Saving to npz functions for all objects.
+- `tsd.value_from` can take TsdFrame
+- Warning message for deprecating current IO.
+
+
+### 0.3.3 (2023-04-17)
+
+- Fixed minor bug with tkinter
+
+
+### 0.3.2 (2023-04-12)
+
+- PyQt removed from the list of dependencies
+
+
+### 0.3.1 (2022-12-08)
+
+- Core functions rewritten with Numba
+
+
+### 0.2.4 (2022-05-02)
+
+
+### 0.2.3 (2022-04-05)
+
+- Fixed minor bug when saving DLC in NWB.
+
+
+### 0.2.3 (2022-04-05)
+
+- Alpha release
+
+
+### 0.2.2 (2022-04-05)
+
+- Beta testing version for public
+
+
+### 0.2.1 (2022-02-07)
+
+- Beta testing version for Peyrache Lab.
+
+
+### 0.2.0 (2022-01-10)
+
+- First version for pynapple with main features in core, process and IO.
+
+
+### 0.2.0 Pre-release (2022-01-06)
+
+- Pre-release version for pynapple with main features in core and process.
+
+
+### 0.1.1 (2021-10-25)
+
+- First release on PyPI.
+- Firt minimal version
\ No newline at end of file
diff --git a/doc/user_guide.md b/doc/user_guide.md
new file mode 100644
index 00000000..6f67aba3
--- /dev/null
+++ b/doc/user_guide.md
@@ -0,0 +1,60 @@
+User guide
+==========
+
+:::{card} Getting started
+```{toctree}
+:maxdepth: 3
+Introduction to pynapple
+```
+:::
+
+:::{card} Input-output
+```{toctree}
+Input-output & lazy-loading
+```
+:::
+
+:::{card} Core methods
+```{toctree}
+Interaction with numpy
+```
+
+```{toctree}
+Core methods
+```
+:::
+
+:::{card} High-level analysis
+```{toctree}
+Correlograms of discrete events
+```
+
+```{toctree}
+Tuning curves
+```
+
+```{toctree}
+Decoding
+```
+
+```{toctree}
+Perievent
+```
+
+```{toctree}
+Randomization
+```
+
+```{toctree}
+Power spectral density
+```
+
+```{toctree}
+Wavelet decomposion
+```
+
+```{toctree}
+Filtering time series
+```
+:::
+
diff --git a/doc/user_guide/01_introduction_to_pynapple.md b/doc/user_guide/01_introduction_to_pynapple.md
new file mode 100644
index 00000000..ccc97661
--- /dev/null
+++ b/doc/user_guide/01_introduction_to_pynapple.md
@@ -0,0 +1,436 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+# Introduction to pynapple
+
+The goal of this tutorial is to quickly learn enough about pynapple to get started with data analysis. This tutorial assumes familiarity with the basics functionalities of numpy.
+
+You can check how to install pynapple [here](../installing.md).
+
+:::{important}
+By default, pynapple will assume a time units in seconds when passing timestamps array or time parameters such as bin size (unless specified with the `time_units` argument)
+:::
+
+***
+Importing pynapple
+------------------
+
+The convention is to import pynapple with a namespace:
+
+```{code-cell} ipython3
+import pynapple as nap
+```
+
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+
+import numpy as np
+import matplotlib.pyplot as plt
+import pandas as pd
+import seaborn as sns
+
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+```
+
+***
+Instantiating pynapple objects
+------------------------------
+
+### `nap.Tsd`: 1-dimensional time serie
+
+If you have a 1-dimensional time series, you use the `nap.Tsd` object. The arguments `t` and `d` are the arguments for timestamps and data.
+
+
+```{code-cell} ipython3
+tsd = nap.Tsd(t=np.arange(100), d=np.random.rand(100))
+
+print(tsd)
+```
+
+
+### `nap.TsdFrame`: 2-dimensional time series
+
+If you have a 2-dimensional time series, you use the `nap.TsdFrame` object. The arguments `t` and `d` are the arguments for timestamps and data. You can add the argument `columns` to label each columns.
+
+
+```{code-cell} ipython3
+tsdframe = nap.TsdFrame(
+ t=np.arange(100), d=np.random.rand(100, 3), columns=["a", "b", "c"]
+)
+
+print(tsdframe)
+```
+
+### `nap.TsdTensor`: n-dimensionals time series
+
+If you have larger than 2 dimensions time series (typically movies), you use the `nap.TsdTensor` object . The arguments `t` and `d` are the arguments for timestamps and data.
+
+
+```{code-cell} ipython3
+tsdtensor = nap.TsdTensor(
+ t=np.arange(100), d=np.random.rand(100, 3, 4)
+)
+
+print(tsdtensor)
+```
+
+### `nap.IntervalSet`: intervals
+
+The `IntervalSet` object stores multiple epochs with a common time unit in a table format. The epochs are strictly _non-overlapping_. Both `start` and `end` arguments are necessary.
+
+
+```{code-cell} ipython3
+epochs = nap.IntervalSet(start=[0, 10], end=[5, 15])
+
+print(epochs)
+
+```
+
+### `nap.Ts`: timestamps
+
+`Ts` object stores timestamps data (typically spike times or reward times). The argument `t` for timestamps is necessary.
+
+
+```{code-cell} ipython3
+ts = nap.Ts(t=np.sort(np.random.uniform(0, 100, 10)))
+
+print(ts)
+```
+
+### `nap.TsGroup`: group of timestamps
+
+
+`TsGroup` is a dictionnary that stores multiple time series with different time stamps (.i.e. a group of neurons with different spike times from one session). The first argument `data` can be a dictionnary of `Ts`, `Tsd` or numpy 1d array.
+
+
+```{code-cell} ipython3
+data = {
+ 0: nap.Ts(t=np.sort(np.random.uniform(0, 100, 1000))),
+ 1: nap.Ts(t=np.sort(np.random.uniform(0, 100, 2000))),
+ 2: nap.Ts(t=np.sort(np.random.uniform(0, 100, 3000))),
+}
+
+tsgroup = nap.TsGroup(data)
+
+print(tsgroup, "\n")
+```
+
+
+***
+Interaction between pynapple objects
+------------------------------------
+
+### Time support : attribute of time series
+
+
+A key feature of how pynapple manipulates time series is an inherent time support object defined for `Ts`, `Tsd`, `TsdFrame` and `TsGroup` objects. The time support object is defined as an `IntervalSet` that provides the time serie with a context. For example, the restrict operation will automatically update the time support object for the new time series. Ideally, the time support object should be defined for all time series when instantiating them. If no time series is given, the time support is inferred from the start and end of the time series.
+
+In this example, a `Tsd` is instantiated with and without a time support of intervals 0 to 5 seconds. Notice how the shape of the `Tsd` varies.
+
+
+```{code-cell} ipython3
+time_support = nap.IntervalSet(start=0, end=2)
+
+print(time_support)
+```
+
+Without time support :
+
+```{code-cell} ipython3
+
+print(nap.Tsd(t=[0, 1, 2, 3, 4], d=[0, 1, 2, 3, 4]))
+
+```
+With time support :
+
+```{code-cell} ipython3
+
+print(
+ nap.Tsd(
+ t=[0, 1, 2, 3, 4], d=[0, 1, 2, 3, 4],
+ time_support = time_support
+ )
+ )
+
+```
+
+The time support object has become an attribute of the time series. Depending on the operation applied to the time series, it will be updated.
+
+```{code-cell} ipython3
+
+tsd = nap.Tsd(
+ t=np.arange(10), d=np.random.randn(10),
+ time_support = time_support
+ )
+
+print(tsd.time_support)
+```
+
+
+
+### Restricting time series to epochs
+
+The central function of pynapple is the `restrict` method of `Ts`, `Tsd`, `TsdFrame` and `TsGroup`. The argument is an `IntervalSet` object. Only time points within the intervals of the `IntervalSet` object are returned. The time support of the time series is updated accordingly.
+
+
+```{code-cell} ipython3
+tsd = nap.Tsd(t=np.arange(10), d=np.random.randn(10))
+
+ep = nap.IntervalSet(start=[0, 7], end=[3.5, 12.4])
+
+print(ep)
+```
+
+From :
+
+```{code-cell} ipython3
+print(tsd)
+```
+
+to :
+
+```{code-cell} ipython3
+new_tsd = tsd.restrict(ep)
+
+print(new_tsd)
+
+```
+
+
+***
+Numpy & pynapple
+----------------
+
+Pynapple relies on numpy to store the data. Pynapple objects behave very similarly to numpy and numpy functions can be applied directly
+
+```{code-cell} ipython3
+tsdtensor = nap.TsdTensor(t=np.arange(100), d=np.random.rand(100, 3, 4))
+```
+
+If a numpy function preserves the time axis, a pynapple object is returned.
+
+In this example, averaging a `TsdTensor` along the second dimension returns a `TsdFrame`:
+
+```{code-cell} ipython3
+print(
+ np.mean(tsdtensor, 1)
+ )
+```
+
+Averaging along the time axis will return a numpy array object:
+
+```{code-cell} ipython3
+print(
+ np.mean(tsdtensor, 0)
+ )
+```
+
+***
+Slicing objects
+---------------
+
+### Slicing time series and intervals
+
+`Ts`, `Tsd`, `TsdFrame`, `TsdTensor` and `IntervalSet` can be sliced similar to numpy array:
+
+```{code-cell} ipython3
+tsdframe = nap.TsdFrame(t=np.arange(10)/10, d=np.random.randn(10,4))
+print(tsdframe)
+```
+
+```{code-cell} ipython3
+print(tsdframe[4:7])
+```
+
+```{code-cell} ipython3
+print(tsdframe[:,0])
+```
+
+```{code-cell} ipython3
+ep = nap.IntervalSet(start=[0, 10, 20], end=[4, 15, 25])
+print(ep)
+```
+
+```{code-cell} ipython3
+print(ep[0:2])
+```
+
+```{code-cell} ipython3
+print(ep[1])
+```
+
+
+### Slicing TsGroup
+
+`TsGroup` object can be indexed to return directly the timestamp object or sliced to return a new `TsGroup`.
+
+Indexing :
+
+```{code-cell} ipython3
+print(tsgroup[0], "\n")
+```
+
+Slicing :
+
+
+```{code-cell} ipython3
+print(tsgroup[[0, 2]])
+```
+
+***
+Core functions
+--------------
+
+Objects have methods that can help transform and refine time series. This is a non exhaustive list.
+
+### Binning : counting events
+
+Time series objects have the `count` method that count the number of timestamps. This is typically used when counting number of spikes within a particular bin over multiple intervals. The returned object is a `Tsd` or `TsdFrame` with the timestamps being the center of the bins.
+
+
+```{code-cell} ipython3
+count = tsgroup.count(1)
+
+print(count)
+```
+
+### Thresholding
+
+Some time series have specific methods. The `threshold` method of `Tsd` returns a new `Tsd` with all the data above or below a given value.
+
+```{code-cell} ipython3
+tsd = nap.Tsd(t=np.arange(10), d=np.random.rand(10))
+
+print(tsd)
+
+print(tsd.threshold(0.5))
+```
+
+An important aspect of the tresholding is that the time support get updated based on the time points remaining. To get the epochs above/below a certain threshold, you can access the time support of the returned object.
+
+```{code-cell} ipython3
+print(tsd.time_support)
+
+print(tsd.threshold(0.5, "below").time_support)
+
+```
+
+
+### Time-bin averaging of data
+
+Many analyses requires to bring time series to the same rates and same dimensions. A quick way to downsample a time series to match in size for example a count array is to bin average. The `bin_average` method takes a bin size in unit of time.
+
+```{code-cell} ipython3
+tsdframe = nap.TsdFrame(t=np.arange(0, 100)/10, d=np.random.randn(100,3))
+
+print(tsdframe)
+
+```
+
+Here we go from a timepoint every 100ms to a timepoint every second.
+
+
+```{code-cell} ipython3
+print(tsdframe.bin_average(1))
+
+```
+
+***
+Loading data
+------------
+
+See [here](02_input_output.md) for more details about loading data.
+
+### Loading NWB
+
+Pynapple supports by default the [NWB standard](https://pynwb.readthedocs.io/en/latest/).
+
+NWB files can be loaded with :
+
+```
+nwb = nap.load_file("path/to/my.nwb")
+```
+
+or directly with the `NWBFile` class:
+
+```
+nwb = nap.NWBFile("path/to/my.nwb")
+
+print(nwb)
+```
+```
+my.nwb
+┍━━━━━━━━━━━━━━━━━┯━━━━━━━━━━━━━┑
+│ Keys │ Type │
+┝━━━━━━━━━━━━━━━━━┿━━━━━━━━━━━━━┥
+│ units │ TsGroup │
+┕━━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━━┙
+```
+
+The returned object behaves like a dictionnary. The first column indicates the keys. The second column indicate the object type.
+
+```
+print(nwb['units'])
+```
+```
+ Index rate location group
+------- ------ ---------- -------
+ 0 1.0 brain 0
+ 1 1.0 brain 0
+ 2 1.0 brain 0
+```
+
+
+***
+Overview of advanced analysis
+-----------------------------
+
+The `process` module of pynapple contains submodules that group methods that can be applied for high level analysis. All of the method are directly available from the `nap` namespace.
+
+:::{important}
+Some functions have been doubled given the nature of the data. For instance, computing a 1d tuning curves from spiking activity requires the `nap.compute_1d_tuning_curves`. The same function for calcium imaging data which is a continuous time series is available with `nap.compute_1d_tuning_curves_continuous`.
+:::
+
+**[Discrete correlograms](05_correlograms)**
+
+This module computes correlograms of discrete events, for example the cross-correlograms of a population of neurons.
+
+**[Bayesian decoding](07_decoding)**
+
+The decoding module perfoms bayesian decoding given a set of tuning curves and a `TsGroup`.
+
+**[Filtering](12_filtering)**
+
+Bandpass, lowpass, highpass or bandstop filtering can be done to any time series using either Butterworth filter or windowed-sinc convolution.
+
+**[Perievent time histogram](08_perievent)**
+
+The perievent module has a set of functions to center time series and timestamps data around a particular events.
+
+**[Randomizing](09_randomization)**
+
+The randomize module holds multiple technique to shuffle timestamps in order to create surrogate datasets.
+
+**[Spectrum](10_power_spectral_density)**
+
+The spectrum module contains the methods to return the (mean) power spectral density of a time series.
+
+**[Tuning curves](06_tuning_curves)**
+
+Tuning curves of neurons based on spiking or calcium activity can be computed.
+
+**[Wavelets](11_wavelets)**
+
+The wavelets module performs Morlet wavelets decomposition of a time series.
\ No newline at end of file
diff --git a/doc/user_guide/02_input_output.md b/doc/user_guide/02_input_output.md
new file mode 100644
index 00000000..cbf363e5
--- /dev/null
+++ b/doc/user_guide/02_input_output.md
@@ -0,0 +1,338 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+# Input-output & lazy-loading
+
+Pynapple provides loaders for [NWB format](https://pynwb.readthedocs.io/en/stable/index.html#).
+
+Each pynapple objects can be saved as a [`npz`](https://numpy.org/devdocs/reference/generated/numpy.savez.html) with a special structure and loaded as a `npz`.
+
+In addition, the `Folder` class helps you walk through a set of nested folders to load/save `npz`/`nwb` files.
+
+
+
+## NWB
+
+When loading a NWB file, pynapple will walk through it and test the compatibility of each data structure with a
+pynapple objects. If the data structure is incompatible, pynapple will ignore it. The class that deals with reading
+NWB file is [`nap.NWBFile`](pynapple.io.interface_nwb.NWBFile). You can pass the path to a NWB file or directly an opened NWB file. Alternatively
+you can use the function [`nap.load_file`](pynapple.io.misc.load_file).
+
+:::{note}
+Creating the NWB file is outside the scope of pynapple. The NWB files used here have already been created before.
+Multiple tools exists to create NWB file automatically. You can check [neuroconv](https://neuroconv.readthedocs.io/en/main/), [NWBGuide](https://nwb-guide.readthedocs.io/en/latest/) or even [NWBmatic](https://github.com/pynapple-org/nwbmatic).
+:::
+
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+
+import numpy as np
+import pynapple as nap
+import os
+import requests, math
+import tqdm
+
+nwb_path = 'A2929-200711.nwb'
+
+if nwb_path not in os.listdir("."):
+ r = requests.get(f"https://osf.io/fqht6/download", stream=True)
+ block_size = 1024*1024
+ with open(nwb_path, 'wb') as f:
+ for data in tqdm.tqdm(r.iter_content(block_size), unit='MB', unit_scale=True,
+ total=math.ceil(int(r.headers.get('content-length', 0))//block_size)):
+ f.write(data)
+```
+
+```{code-cell} ipython3
+data = nap.load_file(nwb_path)
+
+print(data)
+```
+
+Pynapple will give you a table with all the entries of the NWB file that are compatible with a pynapple object.
+When parsing the NWB file, nothing is loaded. The `NWBFile` class keeps track of the position of the data whithin the NWB file with a key. You can see it with the attributes `key_to_id`.
+
+
+```{code-cell} ipython3
+data.key_to_id
+```
+
+Loading an entry will get pynapple to read the data.
+
+
+```{code-cell} ipython3
+z = data['z']
+
+print(data['z'])
+```
+
+Internally, the `NWBClass` has replaced the pointer to the data with the actual data.
+
+While it looks like pynapple has loaded the data, in fact it did not. By default, calling the NWB object will return an HDF5 dataset.
+
+
+```{code-cell} ipython3
+print(type(z.values))
+```
+
+Notice that the time array is always loaded.
+
+
+```{code-cell} ipython3
+print(type(z.index.values))
+```
+
+This is very useful in the case of large dataset that do not fit in memory. You can then get a chunk of the data that will actually be loaded.
+
+
+```{code-cell} ipython3
+z_chunk = z.get(670, 680) # getting 10s of data.
+
+print(z_chunk)
+```
+
+Data are now loaded.
+
+
+```{code-cell} ipython3
+print(type(z_chunk.values))
+```
+
+You can still apply any high level function of pynapple. For example here, we compute some tuning curves without preloading the dataset.
+
+
+```{code-cell} ipython3
+tc = nap.compute_1d_tuning_curves(data['units'], data['y'], 10)
+
+```
+
+:::{warning}
+Carefulness should still apply when calling any pynapple function on a memory map. Pynapple does not implement any batching function internally. Calling a high level function of pynapple on a dataset that do not fit in memory will likely cause a memory error.
+:::
+
+
+To change this behavior, you can pass `lazy_loading=False` when instantiating the `NWBClass`.
+
+
+```{code-cell} ipython3
+data = nap.NWBFile(nwb_path, lazy_loading=False)
+
+z = data['z']
+
+print(type(z.d))
+```
+
+## Saving as NPZ
+
+Pynapple objects have `save` methods to save them as npz files.
+
+```{code-cell} ipython3
+tsd = nap.Tsd(t=np.arange(10), d=np.arange(10))
+tsd.save("my_tsd.npz")
+
+print(nap.load_file("my_tsd.npz"))
+```
+
+To load a NPZ to pynapple, it must contain particular set of keys.
+
+```{code-cell} ipython3
+print(np.load("my_tsd.npz"))
+```
+
+When the pynapple object have metadata, they are added to the NPZ file.
+
+```{code-cell} ipython3
+tsgroup = nap.TsGroup({
+ 0:nap.Ts(t=[0,1,2]),
+ 1:nap.Ts(t=[0,1,2])
+ }, my_label = ["a", "b"])
+tsgroup.save("group")
+
+print(np.load("group.npz"))
+print(np.load("group.npz")["my_label"])
+```
+
+## Memory map
+
+### Numpy memory map
+
+Pynapple can work with [`numpy.memmap`](https://numpy.org/doc/stable/reference/generated/numpy.memmap.html).
+
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+
+data = np.memmap("memmap.dat", dtype='float32', mode='w+', shape = (10, 3))
+
+data[:] = np.random.randn(10, 3).astype('float32')
+
+timestep = np.arange(10)
+```
+
+```{code-cell}
+print(type(data))
+```
+
+Instantiating a pynapple `TsdFrame` will keep the `data` as a memory map.
+
+
+```{code-cell} ipython3
+eeg = nap.TsdFrame(t=timestep, d=data)
+
+print(eeg)
+```
+
+We can check the type of `eeg.values`.
+
+
+```{code-cell} ipython3
+print(type(eeg.values))
+```
+
+### Zarr
+
+It is possible to use Higher level library like [zarr](https://zarr.readthedocs.io/en/stable/index.html) also not directly.
+
+
+```{code-cell} ipython3
+import zarr
+zarr_array = zarr.zeros((10000, 5), chunks=(1000, 5), dtype='i4')
+timestep = np.arange(len(zarr_array))
+
+tsdframe = nap.TsdFrame(t=timestep, d=zarr_array)
+```
+
+As the warning suggest, `zarr_array` is converted to numpy array.
+
+
+```{code-cell} ipython3
+print(type(tsdframe.d))
+```
+
+To maintain a zarr array, you can change the argument `load_array` to False.
+
+
+```{code-cell} ipython3
+tsdframe = nap.TsdFrame(t=timestep, d=zarr_array, load_array=False)
+
+print(type(tsdframe.d))
+```
+
+Within pynapple, numpy memory map are recognized as numpy array while zarr array are not.
+
+
+```{code-cell} ipython3
+print(type(data), "Is np.ndarray? ", isinstance(data, np.ndarray))
+print(type(zarr_array), "Is np.ndarray? ", isinstance(zarr_array, np.ndarray))
+```
+
+Similar to numpy memory map, you can use pynapple functions directly.
+
+
+```{code-cell} ipython3
+ep = nap.IntervalSet(0, 10)
+tsdframe.restrict(ep)
+```
+
+```{code-cell} ipython3
+group = nap.TsGroup({0:nap.Ts(t=[10, 20, 30])})
+
+sta = nap.compute_event_trigger_average(group, tsdframe, 1, (-2, 3))
+
+print(type(tsdframe.values))
+print("\n")
+print(sta)
+```
+
+## Navigating a dataset
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+
+import zipfile
+
+project_path = "MyProject"
+
+
+if project_path not in os.listdir("."):
+ r = requests.get(f"https://osf.io/a9n6r/download", stream=True)
+ block_size = 1024*1024
+ with open(project_path+".zip", 'wb') as f:
+ for data in tqdm.tqdm(r.iter_content(block_size), unit='MB', unit_scale=True,
+ total=math.ceil(int(r.headers.get('content-length', 0))//block_size)):
+ f.write(data)
+
+ with zipfile.ZipFile(project_path+".zip", 'r') as zip_ref:
+ zip_ref.extractall(".")
+
+```
+
+We can load a folder containing multiple animals and sessions with the `Folders` class. The method `nap.load_folder` provides a shortcut.
+
+
+```{code-cell} ipython3
+project = nap.load_folder(project_path)
+
+print(project)
+```
+
+The pynapple IO offers a convenient way of visualizing and navigating a folder
+based dataset. To visualize the whole hierarchy of Folders, you can call the
+view property or the expand function.
+
+```{code-cell} ipython3
+
+project.view
+
+```
+
+Here it shows all the subjects (in this case only A2929),
+all the sessions and all of the derivatives folders.
+It shows as well all the NPZ files that contains a pynapple object
+and the NWB files.
+
+The object project behaves like a nested dictionary.
+It is then easy to loop and navigate through a hierarchy of folders
+when doing analyses. In this case, we are gonna take only the
+session A2929-200711.
+
+```{code-cell} ipython3
+session = project["sub-A2929"]["A2929-200711"]
+print(session)
+```
+
+The Folder view gives the path to any object. It can then be easily loaded.
+
+```{code-cell} ipython3
+print(project["sub-A2929"]["A2929-200711"]["pynapplenwb"]["A2929-200711"])
+```
+### JSON sidecar file
+
+A good practice for sharing datasets is to write as many
+metainformation as possible. Following
+[BIDS](https://bids-standard.github.io/bids-starter-kit/index.html)
+specifications, any data files should be accompagned by a JSON sidecar file.
+
+This is possible using the `Folder` class of pynapple with the argument `description`.
+
+```{code-cell} ipython3
+epoch = nap.IntervalSet(start=np.array([0, 3]), end=np.array([1, 6]))
+session.save("stimulus-fish", epoch, description="Fish pictures to V1")
+```
+
+It is then possible to read the description with the `doc` method of the `Folder` object.
+
+```{code-cell} ipython3
+session.doc("stimulus-fish")
+```
diff --git a/doc/user_guide/03_interacting_with_numpy.md b/doc/user_guide/03_interacting_with_numpy.md
new file mode 100644
index 00000000..ba973bd9
--- /dev/null
+++ b/doc/user_guide/03_interacting_with_numpy.md
@@ -0,0 +1,240 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+Numpy tutorial
+=======================
+
+This tutorial shows how pynapple interact with numpy.
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+import numpy as np
+import pynapple as nap
+import pandas as pd
+```
+
+Multiple time series object are avaible depending on the shape of the data.
+
+- `TsdTensor` : for data with of more than 2 dimensions, typically movies.
+- `TsdFrame` : for column-based data. It can be easily converted to a pandas.DataFrame. Columns can be labelled and selected similar to pandas.
+- `Tsd` : one-dimensional time series. It can be converted to a pandas.Series.
+- `Ts` : For timestamps data only.
+
+
++++
+
+Initialization
+--------------
+
+
+```{code-cell} ipython3
+tsdtensor = nap.TsdTensor(t=np.arange(100), d=np.random.rand(100, 5, 5), time_units="s")
+tsdframe = nap.TsdFrame(t=np.arange(100), d=np.random.rand(100, 3), columns = ['a', 'b', 'c'])
+tsd = nap.Tsd(t=np.arange(100), d=np.random.rand(100))
+ts = nap.Ts(t=np.arange(100))
+
+print(tsdtensor)
+```
+
+`Tsd` and `Ts` can be converted to a `pandas.Series`.
+
+
+```{code-cell} ipython3
+print(tsd.as_series())
+```
+
+`TsdFrame` to a `pandas.DataFrame`.
+
+
+```{code-cell} ipython3
+print(tsdframe.as_dataframe())
+```
+
+Attributes
+----------
+The numpy array is accesible with the attributes `.values`, `.d` and functions `.as_array()`, `to_numpy()`.
+The time index array is a `TsIndex` object accessible with `.index` or `.t`.
+`.shape` and `.ndim` are also accessible.
+
+
+```{code-cell} ipython3
+print(tsdtensor.ndim)
+print(tsdframe.shape)
+print(len(tsd))
+```
+
+Slicing
+-------
+Slicing is very similar to numpy array. The first dimension is always time and time support is always passed on if a pynapple object is returned.
+
+First 10 elements. Return a `TsdTensor`
+
+
+```{code-cell} ipython3
+print(tsdtensor[0:10])
+```
+
+First column. Return a `Tsd`
+
+
+```{code-cell} ipython3
+print(tsdframe[:,0])
+```
+
+First element. Return a numpy ndarray
+
+
+```{code-cell} ipython3
+print(tsdtensor[0])
+```
+
+The time support is never changing when slicing time down.
+
+
+```{code-cell} ipython3
+print(tsd.time_support)
+print(tsd[0:20].time_support)
+```
+
+`TsdFrame` offers special slicing similar to `pandas.DataFrame`.
+
+Only `TsdFrame` can have columns labelling and indexing.
+
+
+```{code-cell} ipython3
+print(tsdframe.loc['a'])
+print(tsdframe.loc[['a', 'c']])
+```
+
+Arithmetic
+----------
+Arithmetical operations works similar to numpy
+
+
+```{code-cell} ipython3
+tsd = nap.Tsd(t=np.arange(5), d=np.ones(5))
+print(tsd + 1)
+```
+
+It is possible to do array operations on the time series provided that the dimensions matches.
+The output will still be a time series object.
+
+
+```{code-cell} ipython3
+print(tsd - np.ones(5))
+```
+
+Nevertheless operations like this are not permitted :
+
+
+```{code-cell} ipython3
+try:
+ tsd + tsd
+except Exception as error:
+ print(error)
+```
+
+Array operations
+----------------
+The most common numpy functions will return a time series if the output first dimension matches the shape of the time index.
+
+Here the `TsdTensor` is averaged along the time axis. The output is a numpy array.
+
+
+```{code-cell} ipython3
+print(np.mean(tsdtensor, 0))
+```
+
+Here averaging across the second dimension returns a `TsdFrame`.
+
+
+```{code-cell} ipython3
+print(np.mean(tsdtensor, 1))
+```
+
+This is not true for FFT functions though.
+
+
+```{code-cell} ipython3
+try:
+ np.fft.fft(tsd)
+except Exception as error:
+ print(error)
+```
+
+Concatenating
+-------------
+It is possible to concatenate time series providing than they don't overlap meaning time indexe should be already sorted through all time series to concatenate
+
+
+```{code-cell} ipython3
+tsd1 = nap.Tsd(t=np.arange(5), d=np.ones(5))
+tsd2 = nap.Tsd(t=np.arange(5)+10, d=np.ones(5)*2)
+tsd3 = nap.Tsd(t=np.arange(5)+20, d=np.ones(5)*3)
+
+print(np.concatenate((tsd1, tsd2, tsd3)))
+```
+
+It's also possible to concatenate vertically if time indexes matches up to pynapple float precision
+
+
+```{code-cell} ipython3
+tsdframe = nap.TsdFrame(t=np.arange(5), d=np.random.randn(5, 3))
+
+print(np.concatenate((tsdframe, tsdframe), 1))
+```
+
+Spliting
+--------
+Array split functions are also implemented
+
+
+```{code-cell} ipython3
+print(np.array_split(tsdtensor[0:10], 2))
+```
+
+Modifying
+---------
+It is possible to modify a time series element wise
+
+
+```{code-cell} ipython3
+print(tsd1)
+
+tsd1[0] = np.pi
+
+print(tsd1)
+```
+
+It is also possible to modify a time series with logical operations
+
+
+```{code-cell} ipython3
+tsd[tsd.values>0.5] = 0.0
+
+print(tsd)
+```
+
+Sorting
+---------
+It is not possible to sort along the first dimension as it would break the sorting of the time index
+
+
+```{code-cell} ipython3
+tsd = nap.Tsd(t=np.arange(100), d=np.random.rand(100))
+
+try:
+ np.sort(tsd)
+except Exception as error:
+ print(error)
+```
diff --git a/doc/user_guide/04_core_methods.md b/doc/user_guide/04_core_methods.md
new file mode 100644
index 00000000..ba269098
--- /dev/null
+++ b/doc/user_guide/04_core_methods.md
@@ -0,0 +1,375 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+# Core methods
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+import pynapple as nap
+import numpy as np
+import matplotlib.pyplot as plt
+import seaborn as sns
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+```
+
+## Interval sets methods
+
+### Interaction between epochs
+
+```{code-cell} ipython3
+epoch1 = nap.IntervalSet(start=0, end=10) # no time units passed. Default is us.
+epoch2 = nap.IntervalSet(start=[5, 30], end=[20, 45])
+print(epoch1, "\n")
+print(epoch2, "\n")
+```
+
+#### `union`
+
+```{code-cell} ipython3
+epoch = epoch1.union(epoch2)
+print(epoch)
+```
+
+#### `intersect`
+
+```{code-cell} ipython3
+epoch = epoch1.intersect(epoch2)
+print(epoch)
+```
+
+#### `set_diff`
+
+```{code-cell} ipython3
+epoch = epoch1.set_diff(epoch2)
+print(epoch)
+```
+
+### `split`
+
+Useful for chunking time series, the `split` method splits an `IntervalSet` in a new
+`IntervalSet` based on the `interval_size` argument.
+
+```{code-cell} ipython3
+epoch = nap.IntervalSet(start=0, end=100)
+
+print(epoch.split(10, time_units="s"))
+```
+
+### Drop intervals
+
+```{code-cell} ipython3
+epoch = nap.IntervalSet(start=[5, 30], end=[6, 45])
+print(epoch)
+```
+
+#### `drop_short_intervals`
+
+```{code-cell} ipython3
+print(
+ epoch.drop_short_intervals(threshold=5)
+ )
+```
+
+#### `drop_long_intervals`
+
+```{code-cell} ipython3
+print(
+ epoch.drop_long_intervals(threshold=5)
+ )
+```
+
+### `merge_close_intervals`
+
+```{code-cell} ipython3
+:tags: [hide-input]
+epoch = nap.IntervalSet(start=[1, 7], end=[6, 45])
+print(epoch)
+```
+If two intervals are closer than the `threshold` argument, they are merged.
+
+```{code-cell} ipython3
+print(
+ epoch.merge_close_intervals(threshold=2.0)
+ )
+```
+
+
+## Metadata
+
+One advantage of grouping time series is that metainformation
+can be added directly on an element-wise basis.
+In this case, we add labels to each Ts object when instantiating the group and after.
+We can then use this label to split the group.
+See the TsGroup documentation for a complete methodology for splitting TsGroup objects.
+
+```{code-cell} ipython3
+group = {
+ 0: nap.Ts(t=np.sort(np.random.uniform(0, 100, 10))),
+ 1: nap.Ts(t=np.sort(np.random.uniform(0, 100, 20))),
+ 2: nap.Ts(t=np.sort(np.random.uniform(0, 100, 30))),
+}
+time_support = nap.IntervalSet(0, 100)
+
+tsgroup = nap.TsGroup(group, time_support=time_support)
+
+tsgroup['label1'] = np.random.randn(3)
+
+tsgroup.set_info(label2 = ['a', 'b', 'c'])
+
+print(tsgroup, "\n")
+
+```
+
+## Time series method
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+tsdframe = nap.TsdFrame(t=np.arange(100), d=np.random.randn(100, 3), columns=['a', 'b', 'c'])
+epochs = nap.IntervalSet([10, 65], [25, 80])
+tsd = nap.Tsd(t=np.arange(0, 100, 1), d=np.sin(np.arange(0, 10, 0.1)))
+```
+
+
+
+
+### `restrict`
+
+`restrict` is used to get time points within an `IntervalSet`. This method is available
+for `TsGroup`, `Tsd`, `TsdFrame`, `TsdTensor` and `Ts` objects.
+
+```{code-cell} ipython3
+tsdframe.restrict(epochs)
+```
+```{code-cell} ipython3
+:tags: [hide-input]
+plt.figure()
+plt.plot(tsdframe.restrict(epochs))
+[plt.axvspan(s, e, alpha=0.2) for s, e in epochs.values]
+plt.xlabel("Time (s)")
+plt.title("tsdframe.restrict(epochs)")
+plt.xlim(0, 100)
+plt.show()
+```
+
+This operation update the time support attribute accordingly.
+
+```{code-cell} ipython3
+print(epochs)
+print(tsdframe.restrict(epochs).time_support)
+```
+
+### `count`
+
+`count` the number of timestamps within bins or epochs of an `IntervalSet` object.
+This method is available for `TsGroup`, `Tsd`, `TsdFrame`, `TsdTensor` and `Ts` objects.
+
+With a defined bin size:
+```{code-cell} ipython3
+count1 = tsgroup.count(bin_size=1.0, time_units='s')
+print(count1)
+```
+```{code-cell} ipython3
+:tags: [hide-input]
+plt.figure()
+plt.plot(count1[:,2], 'o-')
+plt.title("tsgroup.count(bin_size=1.0)")
+plt.plot(tsgroup[2].fillna(-1), '|', markeredgewidth=2)
+[plt.axvline(t, linewidth=0.5, alpha=0.5) for t in np.arange(0, 21)]
+plt.xlabel("Time (s)")
+plt.xlim(0, 20)
+plt.show()
+```
+
+With an `IntervalSet`:
+```{code-cell} ipython3
+count_ep = tsgroup.count(ep=epochs)
+
+print(count_ep)
+```
+
+### `bin_average`
+
+`bin_average` downsample time series by averaging data point falling within a bin.
+This method is available for `Tsd`, `TsdFrame` and `TsdTensor`.
+
+```{code-cell} ipython3
+tsdframe.bin_average(3.5)
+```
+```{code-cell} ipython3
+:tags: [hide-input]
+bin_size = 3.5
+plt.figure()
+plt.plot(tsdframe[:,0], '.--', label="tsdframe[:,0]")
+plt.plot(tsdframe[:,0].bin_average(bin_size), 'o-', label="new_tsdframe[:,0]")
+plt.title(f"tsdframe.bin_average(bin_size={bin_size})")
+[plt.axvline(t, linewidth=0.5, alpha=0.5) for t in np.arange(0, 21,bin_size)]
+plt.xlabel("Time (s)")
+plt.xlim(0, 20)
+plt.legend(bbox_to_anchor=(1.0, 0.5, 0.5, 0.5))
+plt.show()
+```
+
+
+
+### `interpolate`
+
+The`interpolate` method of `Tsd`, `TsdFrame` and `TsdTensor` can be used to fill gaps in a time series. It is a wrapper of [`numpy.interp`](https://numpy.org/devdocs/reference/generated/numpy.interp.html).
+
+
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+tsd = nap.Tsd(t=np.arange(0, 25, 5), d=np.random.randn(5))
+ts = nap.Ts(t=np.arange(0, 21, 1))
+```
+
+```{code-cell} ipython3
+new_tsd = tsd.interpolate(ts)
+```
+
+```{code-cell} ipython3
+:tags: [hide-input]
+plt.figure()
+plt.plot(new_tsd, '.-', label="new_tsd")
+plt.plot(tsd, 'o', label="tsd")
+plt.plot(ts.fillna(0), '+', label="ts")
+plt.title("tsd.interpolate(ts)")
+plt.xlabel("Time (s)")
+plt.legend(bbox_to_anchor=(1.0, 0.5, 0.5, 0.5))
+plt.show()
+```
+
+
+
+### `value_from`
+
+`value_from` assign to every timestamps the closed value in time from another time series. Let's define the time series we want to assign values from.
+
+
+For every timestamps in `tsgroup`, we want to assign the closest value in time from `tsd`.
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+tsd = nap.Tsd(t=np.arange(0, 100, 1), d=np.sin(np.arange(0, 10, 0.1)))
+```
+
+```{code-cell} ipython3
+tsgroup_from_tsd = tsgroup.value_from(tsd)
+```
+
+We can display the first element of `tsgroup` and `tsgroup_sin`.
+
+```{code-cell} ipython3
+:tags: [hide-input]
+plt.figure()
+plt.plot(tsgroup[0].fillna(0), "|", label="tsgroup[0]", markersize=20, mew=3)
+plt.plot(tsd, linewidth=2, label="tsd")
+plt.plot(tsgroup_from_tsd[0], "o", label = "tsgroup_from_tsd[0]", markersize=20)
+plt.title("tsgroup.value_from(tsd)")
+plt.xlabel("Time (s)")
+plt.yticks([-1, 0, 1])
+plt.legend(bbox_to_anchor=(1.0, 0.5, 0.5, 0.5))
+plt.show()
+```
+
+### `threshold`
+
+The method `threshold` of `Tsd` returns a new `Tsd` with all the data above or
+below a certain threshold. Default is `above`. The time support
+of the new `Tsd` object get updated accordingly.
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+tsd = nap.Tsd(t=np.arange(0, 100, 1), d=np.sin(np.arange(0, 10, 0.1)))
+```
+
+```{code-cell} ipython3
+tsd_above = tsd.threshold(0.5, method='above')
+```
+This method can be used to isolate epochs for which a signal
+is above/below a certain threshold.
+
+```{code-cell} ipython3
+epoch_above = tsd_above.time_support
+```
+```{code-cell} ipython3
+:tags: [hide-input]
+plt.figure()
+plt.plot(tsd, label="tsd")
+plt.plot(tsd_above, 'o-', label="tsd_above")
+[plt.axvspan(s, e, alpha=0.2) for s, e in epoch_above.values]
+plt.axhline(0.5, linewidth=0.5, color='grey')
+plt.legend()
+plt.xlabel("Time (s)")
+plt.title("tsd.threshold(0.5)")
+plt.show()
+```
+
+
+### Mapping between `TsGroup` and `Tsd`
+
+It's is possible to transform a `TsGroup` to `Tsd` with the method
+`to_tsd` and a `Tsd` to `TsGroup` with the method `to_tsgroup`.
+
+This is useful to flatten the activity of a population in a single array.
+
+```{code-cell} ipython3
+tsd = tsgroup.to_tsd()
+
+print(tsd)
+```
+The object `tsd` contains all the timestamps of the `tsgroup` with
+the associated value being the index of the unit in the `TsGroup`.
+
+The method `to_tsgroup` converts the `Tsd` object back to the original `TsGroup`.
+
+```{code-cell} ipython3
+back_to_tsgroup = tsd.to_tsgroup()
+
+print(back_to_tsgroup)
+```
+
+#### Parameterizing a raster
+
+The method `to_tsd` makes it easier to display a raster plot.
+`TsGroup` object can be plotted with `plt.plot(tsgroup.to_tsd(), 'o')`.
+Timestamps can be mapped to any values passed directly to the method
+or by giving the name of a specific metadata name of the `TsGroup`.
+
+```{code-cell} ipython3
+tsgroup['label'] = np.arange(3)*np.pi
+
+print(tsgroup)
+```
+
+```{code-cell} ipython3
+:tags: [hide-input]
+plt.figure()
+plt.subplot(2,2,1)
+plt.plot(tsgroup.to_tsd(), '|')
+plt.title("tsgroup.to_tsd()")
+plt.xlabel("Time (s)")
+
+plt.subplot(2,2,2)
+plt.plot(tsgroup.to_tsd([10,20,30]), '|')
+plt.title("togroup.to_tsd([10,20,30])")
+plt.xlabel("Time (s)")
+
+plt.subplot(2,2,3)
+plt.plot(tsgroup.to_tsd("label"), '|')
+plt.title("togroup.to_tsd('label')")
+plt.xlabel("Time (s)")
+plt.tight_layout()
+plt.show()
+```
diff --git a/doc/user_guide/05_correlograms.md b/doc/user_guide/05_correlograms.md
new file mode 100644
index 00000000..7c4aeda5
--- /dev/null
+++ b/doc/user_guide/05_correlograms.md
@@ -0,0 +1,73 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+# Correlograms of discrete events
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+import pynapple as nap
+import numpy as np
+import matplotlib.pyplot as plt
+import seaborn as sns
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+```
+
+Let's generate some data. Here we have two neurons recorded together. We can group them in a TsGroup.
+
+```{code-cell} ipython3
+ts1 = nap.Ts(t=np.sort(np.random.uniform(0, 1000, 2000)), time_units="s")
+ts2 = nap.Ts(t=np.sort(np.random.uniform(0, 1000, 1000)), time_units="s")
+epoch = nap.IntervalSet(start=0, end=1000, time_units="s")
+ts_group = nap.TsGroup({0: ts1, 1: ts2}, time_support=epoch)
+print(ts_group)
+```
+
+## Autocorrelograms
+
+We can compute their autocorrelograms meaning the number of spikes of a neuron observed in a time windows centered around its own spikes.
+For this we can use the function `compute_autocorrelogram`.
+We need to specifiy the `binsize` and `windowsize` to bin the spike train.
+
+```{code-cell} ipython3
+autocorrs = nap.compute_autocorrelogram(
+ group=ts_group, binsize=100, windowsize=1000, time_units="ms", ep=epoch # ms
+)
+print(autocorrs)
+```
+The variable `autocorrs` is a pandas DataFrame with the center of the bins
+for the index and each column is an autocorrelogram of one unit in the `TsGroup`.
+
+## Cross-correlograms
+
+Cross-correlograms are computed between pairs of neurons.
+
+```{code-cell} ipython3
+crosscorrs = nap.compute_crosscorrelogram(
+ group=ts_group, binsize=100, windowsize=1000, time_units="ms" # ms
+)
+print(crosscorrs)
+```
+
+Column name `(0, 1)` is read as cross-correlogram of neuron 0 and 1 with neuron 0 being the reference time.
+
+## Event-correlograms
+
+Event-correlograms count the number of event in the `TsGroup` based on an `event` timestamps object.
+
+```{code-cell} ipython3
+eventcorrs = nap.compute_eventcorrelogram(
+ group=ts_group, event = nap.Ts(t=[0, 10, 20]), binsize=0.1, windowsize=1
+ )
+print(eventcorrs)
+```
diff --git a/doc/user_guide/06_tuning_curves.md b/doc/user_guide/06_tuning_curves.md
new file mode 100644
index 00000000..7f7f5584
--- /dev/null
+++ b/doc/user_guide/06_tuning_curves.md
@@ -0,0 +1,368 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+# Tuning curves
+
+Pynapple can compute 1 dimension tuning curves
+(for example firing rate as a function of angular direction)
+and 2 dimension tuning curves ( for example firing rate as a function
+of position). It can also compute average firing rate for different
+epochs (for example firing rate for different epochs of stimulus presentation).
+
+:::{important}
+If you are using calcium imaging data with the activity of the cell as a continuous transient, the function to call ends with `_continuous` for continuous time series (e.g. `compute_1d_tuning_curves_continuous`).
+:::
+
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+import pynapple as nap
+import numpy as np
+import matplotlib.pyplot as plt
+import seaborn as sns
+from pprint import pprint
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+```
+
+```{code-cell}
+:tags: [hide-cell]
+
+group = {
+ 0: nap.Ts(t=np.sort(np.random.uniform(0, 100, 10))),
+ 1: nap.Ts(t=np.sort(np.random.uniform(0, 100, 20))),
+ 2: nap.Ts(t=np.sort(np.random.uniform(0, 100, 30))),
+}
+
+tsgroup = nap.TsGroup(group)
+```
+
+## from epochs
+
+
+The epochs should be stored in a dictionnary :
+```{code-cell} ipython3
+dict_ep = {
+ "stim0": nap.IntervalSet(start=0, end=20),
+ "stim1":nap.IntervalSet(start=30, end=70)
+ }
+```
+
+`nap.compute_discrete_tuning_curves` takes a `TsGroup` for spiking activity and a dictionary of epochs.
+The output is a pandas DataFrame where each column is a unit in the `TsGroup` and each row is one `IntervalSet` type.
+The value is the mean firing rate of the neuron during this set of intervals.
+
+```{code-cell} ipython3
+mean_fr = nap.compute_discrete_tuning_curves(tsgroup, dict_ep)
+
+pprint(mean_fr)
+```
+
+
+## from timestamps activity
+
+### 1-dimension tuning curves
+
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+
+from scipy.ndimage import gaussian_filter1d
+
+# Fake Tuning curves
+N = 6 # Number of neurons
+bins = np.linspace(0, 2*np.pi, 61)
+x = np.linspace(-np.pi, np.pi, len(bins)-1)
+tmp = np.roll(np.exp(-(1.5*x)**2), (len(bins)-1)//2)
+generative_tc = np.array([np.roll(tmp, i*(len(bins)-1)//N) for i in range(N)]).T
+
+# Feature
+T = 50000
+dt = 0.002
+timestep = np.arange(0, T)*dt
+feature = nap.Tsd(
+ t=timestep,
+ d=gaussian_filter1d(np.cumsum(np.random.randn(T)*0.5), 20)%(2*np.pi)
+ )
+index = np.digitize(feature, bins)-1
+
+# Spiking activity
+count = np.random.poisson(generative_tc[index])>0
+tsgroup = nap.TsGroup(
+ {i:nap.Ts(timestep[count[:,i]]) for i in range(N)},
+ time_support = nap.IntervalSet(0, 100)
+ )
+```
+
+Mandatory arguments are `TsGroup`, `Tsd` (or `TsdFrame` with 1 column only)
+and `nb_bins` for number of bins of the tuning curves.
+
+If an `IntervalSet` is passed with `ep`, everything is restricted to `ep`
+otherwise the time support of the feature is used.
+
+The min and max of the tuning curve is by default the min and max of the feature. This can be tweaked with the argument `minmax`.
+
+The output is a pandas DataFrame. Each column is a unit from `TsGroup` argument. The index of the DataFrame carries the center of the bin in feature space.
+
+```{code-cell} ipython3
+tuning_curve = nap.compute_1d_tuning_curves(
+ group=tsgroup,
+ feature=feature,
+ nb_bins=120,
+ minmax=(0, 2*np.pi)
+ )
+
+print(tuning_curve)
+```
+
+```{code-cell} ipython3
+plt.figure()
+plt.plot(tuning_curve)
+plt.xlabel("Feature space")
+plt.ylabel("Firing rate (Hz)")
+plt.show()
+```
+
+Internally, the function is calling the method `value_from` which maps a timestamps
+to its closest value in time from a `Tsd` object.
+It is then possible to validate the tuning curves by displaying the
+timestamps as well as their associated values.
+
+```{code-cell} ipython3
+:tags: [hide-input]
+plt.figure()
+plt.subplot(121)
+plt.plot(tsgroup[3].value_from(feature), 'o')
+plt.plot(feature, label="feature")
+plt.ylabel("Feature")
+plt.xlim(0, 2)
+plt.xlabel("Time (s)")
+plt.subplot(122)
+plt.plot(tuning_curve[3].values, tuning_curve[3].index.values, label="Tuning curve (unit=3)")
+plt.xlabel("Firing rate (Hz)")
+plt.legend()
+plt.show()
+```
+
+
+
+
+
+
+### 2-dimension tuning curves
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+dt = 0.01
+T = 10
+epoch = nap.IntervalSet(start=0, end=T, time_units="s")
+features = np.vstack((np.cos(np.arange(0, T, dt)), np.sin(np.arange(0, T, dt)))).T
+features = nap.TsdFrame(
+ t=np.arange(0, T, dt),
+ d=features,
+ time_units="s",
+ time_support=epoch,
+ columns=["a", "b"],
+)
+tsgroup = nap.TsGroup({
+ 0: nap.Ts(t=np.sort(np.random.uniform(0, T, 10))),
+ 1: nap.Ts(t=np.sort(np.random.uniform(0, T, 15))),
+ 2: nap.Ts(t=np.sort(np.random.uniform(0, T, 20))),
+}, time_support=epoch)
+```
+
+The `group` argument must be a `TsGroup` object.
+The `features` argument must be a 2-columns `TsdFrame` object.
+`nb_bins` can be an int or a tuple of 2 ints.
+
+```{code-cell} ipython3
+tcurves2d, binsxy = nap.compute_2d_tuning_curves(
+ group=tsgroup,
+ features=features,
+ nb_bins=(5,5),
+ minmax=(-1, 1, -1, 1)
+)
+pprint(tcurves2d)
+```
+
+`tcurves2d` is a dictionnary with each key a unit in `TsGroup`. `binsxy` is a numpy array representing the centers of the bins and is useful for plotting tuning curves. Bins that have never been visited by the feature have been assigned a NaN value.
+
+Checking the accuracy of the tuning curves can be bone by displaying the spikes aligned to the features with the function `value_from` which assign to each spikes the corresponding features value for unit 0.
+
+```{code-cell} ipython3
+ts_to_features = tsgroup[0].value_from(features)
+print(ts_to_features)
+```
+`tsgroup[0]` which is a `Ts` object has been transformed to a `TsdFrame` object with each timestamps (spike times) being associated with a features value.
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+
+plt.figure()
+plt.subplot(121)
+plt.plot(features["b"], features["a"], label="features")
+plt.plot(ts_to_features["b"], ts_to_features["a"], "o", color="red", markersize=4, label="spikes")
+plt.xlabel("feature b")
+plt.ylabel("feature a")
+[plt.axvline(b, linewidth=0.5, color='grey') for b in np.linspace(-1, 1, 6)]
+[plt.axhline(b, linewidth=0.5, color='grey') for b in np.linspace(-1, 1, 6)]
+plt.subplot(122)
+extents = (
+ np.min(features["a"]),
+ np.max(features["a"]),
+ np.min(features["b"]),
+ np.max(features["b"]),
+)
+plt.imshow(tcurves2d[0],
+ origin="lower", extent=extents, cmap="viridis",
+ aspect='auto'
+ )
+plt.title("Tuning curve unit 0")
+plt.xlabel("feature b")
+plt.ylabel("feature a")
+plt.grid(False)
+plt.colorbar()
+plt.tight_layout()
+plt.show()
+```
+
+
+## from continuous activity
+
+Tuning curves compute with the following functions are usually made with
+data from calcium imaging activities.
+
+### 1-dimension tuning curves
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+
+from scipy.ndimage import gaussian_filter1d
+
+# Fake Tuning curves
+N = 3 # Number of neurons
+bins = np.linspace(0, 2*np.pi, 61)
+x = np.linspace(-np.pi, np.pi, len(bins)-1)
+tmp = np.roll(np.exp(-(1.5*x)**2), (len(bins)-1)//2)
+generative_tc = np.array([np.roll(tmp, i*(len(bins)-1)//N) for i in range(N)]).T
+
+# Feature
+T = 50000
+dt = 0.002
+timestep = np.arange(0, T)*dt
+feature = nap.Tsd(
+ t=timestep,
+ d=gaussian_filter1d(np.cumsum(np.random.randn(T)*0.5), 20)%(2*np.pi)
+ )
+index = np.digitize(feature, bins)-1
+tmp = generative_tc[index]
+tmp = tmp + np.random.randn(*tmp.shape)*1
+# Calcium activity
+tsdframe = nap.TsdFrame(
+ t=timestep,
+ d=tmp
+ )
+```
+
+Arguments are `TsdFrame` (for example continuous calcium data), `Tsd` or `TsdFrame` for the 1-d feature and `nb_bins` for the number of bins.
+
+```{code-cell} ipython3
+
+tuning_curves = nap.compute_1d_tuning_curves_continuous(
+ tsdframe=tsdframe,
+ feature=feature,
+ nb_bins=120,
+ minmax=(0, 2*np.pi)
+ )
+
+print(tuning_curves)
+```
+
+```{code-cell} ipython3
+plt.figure()
+plt.plot(tuning_curves)
+plt.xlabel("Feature space")
+plt.ylabel("Mean activity")
+plt.show()
+```
+
+### 2-dimension tuning curves
+
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+
+dt = 0.01
+T = 10
+epoch = nap.IntervalSet(start=0, end=T, time_units="s")
+features = np.vstack((np.cos(np.arange(0, T, dt)), np.sin(np.arange(0, T, dt)))).T
+features = nap.TsdFrame(
+ t=np.arange(0, T, dt),
+ d=features,
+ time_units="s",
+ time_support=epoch,
+ columns=["a", "b"],
+)
+
+
+# Calcium activity
+tsdframe = nap.TsdFrame(
+ t=timestep,
+ d=np.random.randn(len(timestep), 2)
+ )
+```
+
+Arguments are `TsdFrame` (for example continuous calcium data), `Tsd` or `TsdFrame` for the 1-d feature and `nb_bins` for the number of bins.
+
+```{code-cell} ipython3
+
+tuning_curves, xy = nap.compute_2d_tuning_curves_continuous(
+ tsdframe=tsdframe,
+ features=features,
+ nb_bins=5,
+ )
+
+print(tuning_curves)
+```
+
+```{code-cell} ipython3
+plt.figure()
+plt.subplot(121)
+plt.plot(features["b"], features["a"], label="features")
+plt.xlabel("feature b")
+plt.ylabel("feature a")
+[plt.axvline(b, linewidth=0.5, color='grey') for b in np.linspace(-1, 1, 6)]
+[plt.axhline(b, linewidth=0.5, color='grey') for b in np.linspace(-1, 1, 6)]
+plt.subplot(122)
+extents = (
+ np.min(features["a"]),
+ np.max(features["a"]),
+ np.min(features["b"]),
+ np.max(features["b"]),
+)
+plt.imshow(tuning_curves[0],
+ origin="lower", extent=extents, cmap="viridis",
+ aspect='auto'
+ )
+plt.title("Tuning curve unit 0")
+plt.xlabel("feature b")
+plt.ylabel("feature a")
+plt.grid(False)
+plt.colorbar()
+plt.tight_layout()
+plt.show()
+```
+
+
+
diff --git a/doc/user_guide/07_decoding.md b/doc/user_guide/07_decoding.md
new file mode 100644
index 00000000..88bbc5ca
--- /dev/null
+++ b/doc/user_guide/07_decoding.md
@@ -0,0 +1,221 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+# Decoding
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+import pynapple as nap
+import numpy as np
+import pandas as pd
+import matplotlib.pyplot as plt
+import seaborn as sns
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+```
+Pynapple supports 1 dimensional and 2 dimensional bayesian decoding. The function returns the decoded feature as well as the probabilities for each timestamps.
+
+
+:::{hint}
+Input to the bayesian decoding functions always include the tuning curves computed from `nap.compute_1d_tuning_curves` or `nap.compute_2d_tuning_curves`.
+:::
+
+## 1-dimensional decoding
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+
+from scipy.ndimage import gaussian_filter1d
+
+# Fake Tuning curves
+N = 6 # Number of neurons
+bins = np.linspace(0, 2*np.pi, 61)
+x = np.linspace(-np.pi, np.pi, len(bins)-1)
+tmp = np.roll(np.exp(-(1.5*x)**2), (len(bins)-1)//2)
+tc = np.array([np.roll(tmp, i*(len(bins)-1)//N) for i in range(N)]).T
+
+tc_1d = pd.DataFrame(index=bins[0:-1], data=tc)
+
+# Feature
+T = 10000
+dt = 0.01
+timestep = np.arange(0, T)*dt
+feature = nap.Tsd(
+ t=timestep,
+ d=gaussian_filter1d(np.cumsum(np.random.randn(T)*0.5), 20)%(2*np.pi)
+ )
+index = np.digitize(feature, bins)-1
+
+# Spiking activity
+
+count = np.random.poisson(tc[index])>0
+tsgroup = nap.TsGroup({i:nap.Ts(timestep[count[:,i]]) for i in range(N)})
+epoch = nap.IntervalSet(0, 10)
+```
+
+To decode, we need to compute tuning curves in 1D.
+
+```{code-cell} ipython3
+tcurves_1d = nap.compute_1d_tuning_curves(
+ tsgroup, feature, nb_bins=61, minmax=(0, 2 * np.pi)
+)
+```
+
+We can display the tuning curves of each neurons
+
+```{code-cell}
+:tags: [hide-input]
+
+plt.figure()
+plt.plot(tcurves_1d)
+plt.xlabel("Feature position")
+plt.ylabel("Rate (Hz)")
+plt.show()
+```
+
+`nap.decode_1d` performs bayesian decoding:
+
+
+```{code-cell} ipython3
+decoded, proba_feature = nap.decode_1d(
+ tuning_curves=tcurves_1d , # 2D tuning curves
+ group=tsgroup, # Spiking activity
+ ep=epoch, # Small epoch
+ bin_size=0.06, # How to bin the spike trains
+ feature=feature, # Features
+)
+```
+
+`decoded` is `Tsd` object containing the decoded feature value. `proba_feature` is a `TsdFrame` containing the probabilities of being in a particular feature bin over time.
+
+```{code-cell} ipython3
+:tags: [hide-input]
+plt.figure(figsize=(12, 6))
+plt.subplot(211)
+plt.plot(feature.restrict(epoch), label="True")
+plt.plot(decoded, label="Decoded")
+plt.legend()
+plt.xlim(epoch[0,0], epoch[0,1])
+plt.subplot(212)
+plt.imshow(proba_feature.values.T, aspect="auto", origin="lower", cmap="viridis")
+plt.xticks([0, len(decoded)], epoch.values[0])
+plt.xlabel("Time (s)")
+plt.show()
+
+```
+
+
+## 2-dimensional decoding
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+
+dt = 0.1
+epoch = nap.IntervalSet(start=0, end=1000, time_units="s")
+features = np.vstack((np.cos(np.arange(0, 1000, dt)), np.sin(np.arange(0, 1000, dt)))).T
+features = nap.TsdFrame(t=np.arange(0, 1000, dt),
+ d=features,
+ time_units="s",
+ time_support=epoch,
+ columns=["a", "b"],
+)
+
+times = features.as_units("us").index.values
+ft = features.values
+alpha = np.arctan2(ft[:, 1], ft[:, 0])
+bins = np.repeat(np.linspace(-np.pi, np.pi, 13)[::, np.newaxis], 2, 1)
+bins += np.array([-2 * np.pi / 24, 2 * np.pi / 24])
+ts_group = {}
+for i in range(12):
+ ts = times[(alpha >= bins[i, 0]) & (alpha <= bins[i + 1, 1])]
+ ts_group[i] = nap.Ts(ts, time_units="us")
+
+ts_group = nap.TsGroup(ts_group, time_support=epoch)
+
+```
+
+To decode, we need to compute tuning curves in 2D.
+
+
+```{code-cell} ipython3
+tcurves2d, binsxy = nap.compute_2d_tuning_curves(
+ group=ts_group, # Spiking activity of 12 neurons
+ features=features, # 2-dimensional features
+ nb_bins=10,
+ ep=epoch,
+ minmax=(-1.0, 1.0, -1.0, 1.0), # Minmax of the features
+)
+```
+
+We can display the tuning curves of each neuron
+
+```{code-cell}
+:tags: [hide-input]
+
+plt.figure(figsize=(20, 9))
+for i in ts_group.keys():
+ plt.subplot(2, 6, i + 1)
+ plt.imshow(
+ tcurves2d[i], extent=(binsxy[1][0], binsxy[1][-1], binsxy[0][0], binsxy[0][-1])
+ )
+ plt.xticks()
+plt.show()
+```
+
+`nap.decode_2d` performs bayesian decoding:
+
+
+```{code-cell} ipython3
+decoded, proba_feature = nap.decode_2d(
+ tuning_curves=tcurves2d, # 2D tuning curves
+ group=ts_group, # Spiking activity
+ ep=epoch, # Epoch
+ bin_size=0.1, # How to bin the spike trains
+ xy=binsxy, # Features position
+ features=features, # Features
+)
+```
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+plt.figure(figsize=(15, 5))
+plt.subplot(131)
+plt.plot(features["a"].get(0,20), label="True")
+plt.plot(decoded["a"].get(0,20), label="Decoded")
+plt.legend()
+plt.xlabel("Time (s)")
+plt.ylabel("Feature a")
+plt.subplot(132)
+plt.plot(features["b"].get(0,20), label="True")
+plt.plot(decoded["b"].get(0,20), label="Decoded")
+plt.legend()
+plt.xlabel("Time (s)")
+plt.title("Feature b")
+plt.subplot(133)
+plt.plot(
+ features["a"].get(0,20),
+ features["b"].get(0,20),
+ label="True",
+)
+plt.plot(
+ decoded["a"].get(0,20),
+ decoded["b"].get(0,20),
+ label="Decoded",
+)
+plt.xlabel("Feature a")
+plt.title("Feature b")
+plt.legend()
+plt.tight_layout()
+plt.show()
+```
\ No newline at end of file
diff --git a/doc/user_guide/08_perievent.md b/doc/user_guide/08_perievent.md
new file mode 100644
index 00000000..d16a6a8f
--- /dev/null
+++ b/doc/user_guide/08_perievent.md
@@ -0,0 +1,137 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+# Perievent
+
+The perievent module allows to re-center time series and timestamps data around a particular event as well as computing events (spikes) trigger average.
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+import pynapple as nap
+import numpy as np
+import matplotlib.pyplot as plt
+import seaborn as sns
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+```
+
+## Peri-Event Time Histogram (PETH)
+
+
+```{code-cell} ipython3
+stim = nap.Tsd(
+ t=np.sort(np.random.uniform(0, 1000, 50)),
+ d=np.random.rand(50), time_units="s"
+)
+ts1 = nap.Ts(t=np.sort(np.random.uniform(0, 1000, 2000)), time_units="s")
+```
+
+The function `compute_perievent` align timestamps to a particular set of timestamps.
+
+```{code-cell} ipython3
+peth = nap.compute_perievent(
+ data=ts1,
+ tref=stim,
+ minmax=(-0.1, 0.2),
+ time_unit="s")
+
+print(peth)
+```
+
+The returned object is a `TsGroup`. The column `ref_times` is a
+metadata column that indicates the center timestamps.
+
+### Raster plot
+
+It is then easy to create a raster plot around the times of the
+stimulation event by calling the `to_tsd` function of pynapple
+to "flatten" the TsGroup `peth`.
+
+```{code-cell} ipython3
+plt.figure(figsize=(10, 6))
+plt.subplot(211)
+plt.plot(np.sum(peth.count(0.01), 1), linewidth=3, color="red")
+plt.xlim(-0.1, 0.2)
+plt.ylabel("Count")
+plt.axvline(0.0)
+plt.subplot(212)
+plt.plot(peth.to_tsd(), "|", markersize=20, color="red", mew=4)
+plt.xlabel("Time from stim (s)")
+plt.ylabel("Stimulus")
+plt.xlim(-0.1, 0.2)
+plt.axvline(0.0)
+```
+
+The same function can be applied to a group of neurons.
+In this case, it returns a dict of `TsGroup`
+
+## Event trigger average
+
+The function `compute_event_trigger_average` compute the average feature around a particular event time.
+
+```{code-cell}
+:tags: [hide-cell]
+
+group = {
+ 0: nap.Ts(t=np.sort(np.random.uniform(0, 100, 10))),
+ 1: nap.Ts(t=np.sort(np.random.uniform(0, 100, 20))),
+ 2: nap.Ts(t=np.sort(np.random.uniform(0, 100, 30))),
+}
+
+tsgroup = nap.TsGroup(group)
+```
+```{code-cell}
+eta = nap.compute_event_trigger_average(
+ group=tsgroup,
+ feature=stim,
+ binsize=0.1,
+ windowsize=(-1, 1))
+
+print(eta)
+```
+
+
+## Peri-Event continuous time series
+
+The function `nap.compute_perievent_continuous` align a time series of any dimensions around events.
+
+```{code-cell}
+:tags: [hide-cell]
+
+features = nap.TsdFrame(t=np.arange(0, 100), d=np.random.randn(100,6))
+events = nap.Ts(t=np.sort(np.random.uniform(0, 100, 5)))
+```
+
+```{code-cell}
+
+perievent = nap.compute_perievent_continuous(
+ data=features,
+ tref=events,
+ minmax=(-1, 1))
+
+print(perievent)
+```
+
+The object perievent is now of shape (number of bins, (dimensions of input), number of events ) :
+
+
+```{code-cell}
+print(perievent.shape)
+```
+
+
+
+
+
+
+
diff --git a/doc/user_guide/09_randomization.md b/doc/user_guide/09_randomization.md
new file mode 100644
index 00000000..40b23be2
--- /dev/null
+++ b/doc/user_guide/09_randomization.md
@@ -0,0 +1,69 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+# Randomization
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+import pynapple as nap
+import numpy as np
+import matplotlib.pyplot as plt
+import seaborn as sns
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+```
+
+Pynapple provides some ready-to-use randomization methods to compute null distributions for statistical testing.
+Different methods preserve or destroy different features of the data.
+
+## Shift timestamps
+
+`shift_timestamps` shifts all the timestamps in a `Ts` object by the same random amount, wrapping the end of the time support to its beginning. This randomization preserves the temporal structure in the data but destroys the temporal relationships with other quantities (e.g. behavioural data).
+When applied on a `TsGroup` object, each series in the group is shifted independently.
+
+
+```{code-cell} ipython3
+ts = nap.Ts(t=np.sort(np.random.uniform(0, 100, 10)), time_units="ms")
+rand_ts = nap.shift_timestamps(ts, min_shift=1, max_shift=20)
+```
+
+## Shuffle timestamp intervals
+
+`shuffle_ts_intervals` computes the intervals between consecutive timestamps, permutes them, and generates a new set of timestamps with the permuted intervals.
+This procedure preserve the distribution of intervals, but not their sequence.
+
+
+```{code-cell} ipython3
+ts = nap.Ts(t=np.sort(np.random.uniform(0, 100, 10)), time_units="s")
+rand_ts = nap.shuffle_ts_intervals(ts)
+```
+
+## Jitter timestamps
+
+`jitter_timestamps` shifts each timestamp in the data of an independent random amount. When applied with a small `max_jitter`, this procedure destroys the fine temporal structure of the data, while preserving structure on longer timescales.
+
+
+```{code-cell} ipython3
+ts = nap.Ts(t=np.sort(np.random.uniform(0, 100, 10)), time_units="s")
+rand_ts = nap.jitter_timestamps(ts, max_jitter=1)
+```
+
+## Resample timestamps
+
+`resample_timestamps` uniformly re-draws the same number of timestamps in `ts`, in the same time support. This procedures preserve the total number of timestamps, but destroys any other feature of the original data.
+
+
+```{code-cell} ipython3
+ts = nap.Ts(t=np.sort(np.random.uniform(0, 100, 10)), time_units="s")
+rand_ts = nap.resample_timestamps(ts)
+```
diff --git a/doc/user_guide/10_power_spectral_density.md b/doc/user_guide/10_power_spectral_density.md
new file mode 100644
index 00000000..84d23a87
--- /dev/null
+++ b/doc/user_guide/10_power_spectral_density.md
@@ -0,0 +1,141 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+Power spectral density
+======================
+
+
+```{code-cell} ipython3
+:tags: [hide-input]
+import pynapple as nap
+import numpy as np
+import matplotlib.pyplot as plt
+import seaborn as sns
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+```
+
+
+***
+Generating a signal
+------------------
+Let's generate a dummy signal with 2Hz and 10Hz sinusoide with white noise.
+
+
+
+```{code-cell} ipython3
+F = [2, 10]
+
+Fs = 2000
+t = np.arange(0, 200, 1/Fs)
+sig = nap.Tsd(
+ t=t,
+ d=np.cos(t*2*np.pi*F[0])+np.cos(t*2*np.pi*F[1])+np.random.normal(0, 3, len(t)),
+ time_support = nap.IntervalSet(0, 200)
+ )
+```
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+plt.figure()
+plt.plot(sig.get(0, 0.4))
+plt.title("Signal")
+plt.xlabel("Time (s)")
+```
+
+Computing power spectral density (PSD)
+--------------------------------------
+
+To compute a PSD of a signal, you can use the function `nap.compute_power_spectral_density`. With `norm=True`, the output of the FFT is divided by the length of the signal.
+
+
+```{code-cell} ipython3
+psd = nap.compute_power_spectral_density(sig, norm=True)
+```
+
+Pynapple returns a pandas DataFrame.
+
+
+```{code-cell} ipython3
+print(psd)
+```
+
+It is then easy to plot it.
+
+
+```{code-cell} ipython3
+plt.figure()
+plt.plot(np.abs(psd))
+plt.xlabel("Frequency (Hz)")
+plt.ylabel("Amplitude")
+```
+
+Note that the output of the FFT is truncated to positive frequencies. To get positive and negative frequencies, you can set `full_range=True`.
+By default, the function returns the frequencies up to the Nyquist frequency.
+Let's zoom on the first 20 Hz.
+
+
+```{code-cell} ipython3
+plt.figure()
+plt.plot(np.abs(psd))
+plt.xlabel("Frequency (Hz)")
+plt.ylabel("Amplitude")
+plt.xlim(0, 20)
+```
+
+We find the two frequencies 2 and 10 Hz.
+
+By default, pynapple assumes a constant sampling rate and a single epoch. For example, computing the FFT over more than 1 epoch will raise an error.
+
+
+```{code-cell} ipython3
+double_ep = nap.IntervalSet([0, 50], [20, 100])
+
+try:
+ nap.compute_power_spectral_density(sig, ep=double_ep)
+except ValueError as e:
+ print(e)
+```
+
+Computing mean PSD
+------------------
+
+It is possible to compute an average PSD over multiple epochs with the function `nap.compute_mean_power_spectral_density`.
+
+In this case, the argument `interval_size` determines the duration of each epochs upon which the FFT is computed.
+If not epochs is passed, the function will split the `time_support`.
+
+In this case, the FFT will be computed over epochs of 10 seconds.
+
+
+```{code-cell} ipython3
+mean_psd = nap.compute_mean_power_spectral_density(sig, interval_size=20.0, norm=True)
+```
+
+Let's compare `mean_psd` to `psd`. In both cases, the ouput is normalized.
+
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+plt.figure()
+plt.plot(np.abs(psd), label='PSD')
+plt.plot(np.abs(mean_psd), label='Mean PSD (10s)')
+plt.xlabel("Frequency (Hz)")
+plt.ylabel("Amplitude")
+plt.legend()
+plt.xlim(0, 15)
+```
+
+As we can see, `nap.compute_mean_power_spectral_density` was able to smooth out the noise.
diff --git a/doc/user_guide/11_wavelets.md b/doc/user_guide/11_wavelets.md
new file mode 100644
index 00000000..6116f20f
--- /dev/null
+++ b/doc/user_guide/11_wavelets.md
@@ -0,0 +1,574 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+Wavelet Transform
+=================
+
+This tutorial covers the use of `nap.compute_wavelet_transform` to do continuous wavelet transform. By default, pynapple uses Morlet wavelets.
+
+Wavelet are a great tool for capturing changes of spectral characteristics of a signal over time. As neural signals change
+and develop over time, wavelet decompositions can aid both visualization and analysis.
+
+The function `nap.generate_morlet_filterbank` can help parametrize and visualize the Morlet wavelets.
+
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+import pynapple as nap
+import numpy as np
+import matplotlib.pyplot as plt
+import seaborn as sns
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+```
+
+***
+Generating a Dummy Signal
+------------------
+Let's generate a dummy signal to analyse with wavelets!
+
+Our dummy dataset will contain two components, a low frequency 2Hz sinusoid combined
+with a sinusoid which increases frequency from 5 to 15 Hz throughout the signal.
+
+
+```{code-cell} ipython3
+Fs = 2000
+t = np.linspace(0, 5, Fs * 5)
+two_hz_phase = t * 2 * np.pi * 2
+two_hz_component = np.sin(two_hz_phase)
+increasing_freq_component = np.sin(t * (5 + t) * np.pi * 2)
+sig = nap.Tsd(
+ d=two_hz_component + increasing_freq_component + np.random.normal(0, 0.1, 10000),
+ t=t,
+)
+```
+
+```{code-cell} ipython3
+:tags: [hide-input]
+fig, ax = plt.subplots(3, constrained_layout=True, figsize=(10, 5))
+ax[0].plot(t, two_hz_component)
+ax[0].set_title("2Hz Component")
+ax[1].plot(t, increasing_freq_component)
+ax[1].set_title("Increasing Frequency Component")
+ax[2].plot(sig)
+ax[2].set_title("Dummy Signal")
+[ax[i].margins(0) for i in range(3)]
+[ax[i].set_ylim(-2.5, 2.5) for i in range(3)]
+[ax[i].set_xlabel("Time (s)") for i in range(3)]
+[ax[i].set_ylabel("Signal") for i in range(3)]
+```
+
+***
+Visualizing the Morlet Wavelets
+-------------------------------
+We will be decomposing our dummy signal using wavelets of different frequencies. These wavelets
+can be examined using the `generate_morlet_filterbank` function. Here we will use the default parameters
+to define a Morlet filter bank. This function is a good way to visually inspect the quality of the wavelets.
+
+
+```{code-cell} ipython3
+# Define the frequency of the wavelets in our filter bank
+freqs = np.linspace(1, 25, num=25)
+# Get the filter bank
+filter_bank = nap.generate_morlet_filterbank(
+ freqs, Fs, gaussian_width=1.5, window_length=1.0
+)
+
+print(filter_bank)
+```
+
+`filter_bank` is a `TsdFrame`.
+
+
+```{code-cell} ipython3
+:tags: [hide-input]
+def plot_filterbank(filter_bank, freqs, title):
+ fig, ax = plt.subplots(1, constrained_layout=True, figsize=(10, 7))
+ for f_i in range(filter_bank.shape[1]):
+ ax.plot(filter_bank[:, f_i].real() + f_i * 1.5)
+ ax.text(-6.8, 1.5 * f_i, f"{np.round(freqs[f_i], 2)}Hz", va="center", ha="left")
+
+ ax.set_yticks([])
+ ax.set_xlim(-5, 5)
+ ax.set_xlabel("Time (s)")
+ ax.set_title(title)
+
+
+title = "Morlet Wavelet Filter Bank (Real Components): gaussian_width=1.5, window_length=1.0"
+plot_filterbank(filter_bank, freqs, title)
+```
+
+***
+Parametrizing the wavelets
+--------------------------
+Let's visualize what changing our parameters does to the
+underlying wavelets.
+
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+window_lengths = [1.0, 3.0]
+gaussian_widths = [1.0, 3.0]
+colors = np.array([["r", "g"], ["b", "y"]])
+fig, ax = plt.subplots(
+ len(window_lengths) + 1,
+ len(gaussian_widths) + 1,
+ constrained_layout=True,
+ figsize=(10, 8),
+)
+for row_i, wl in enumerate(window_lengths):
+ for col_i, gw in enumerate(gaussian_widths):
+ wavelet = nap.generate_morlet_filterbank(
+ np.array([1.0]), 1000, gaussian_width=gw, window_length=wl, precision=12
+ )[:, 0].real()
+ ax[row_i, col_i].plot(wavelet, c=colors[row_i, col_i])
+ fft = nap.compute_power_spectral_density(wavelet)
+ for i, j in [(row_i, -1), (-1, col_i)]:
+ ax[i, j].plot(fft.abs(), c=colors[row_i, col_i])
+for i in range(len(window_lengths)):
+ for j in range(len(gaussian_widths)):
+ ax[i, j].set(xlabel="Time (s)", yticks=[])
+for ci, gw in enumerate(gaussian_widths):
+ ax[0, ci].set_title(f"gaussian_width={gw}", fontsize=10)
+for ri, wl in enumerate(window_lengths):
+ ax[ri, 0].set_ylabel(f"window_length={wl}", fontsize=10)
+fig.suptitle("Parametrization Visualization (1 Hz Wavelet)")
+ax[-1, -1].set_visible(False)
+for i in range(len(window_lengths)):
+ ax[-1, i].set(
+ xlim=(0, 2), yticks=[], ylabel="Frequency Response", xlabel="Frequency (Hz)"
+ )
+for i in range(len(gaussian_widths)):
+ ax[i, -1].set(
+ xlim=(0, 2), yticks=[], ylabel="Frequency Response", xlabel="Frequency (Hz)"
+ )
+```
+
+Increasing `window_length` increases the number of wavelet cycles present in the oscillations (cycles), and
+correspondingly increases the time window that the wavelet covers.
+
+The `gaussian_width` parameter determines the shape of the gaussian window being convolved with the sinusoidal
+component of the wavelet
+
+Both of these parameters can be tweaked to control for the trade-off between time resolution and frequency resolution.
+
+
+***
+Continuous wavelet transform
+----------------------------
+Here we will use the `compute_wavelet_transform` function to decompose our signal using the filter bank shown
+above. Wavelet decomposition breaks down a signal into its constituent wavelets, capturing both time and
+frequency information for analysis. We will calculate this decomposition and plot it's corresponding
+scalogram (which is another name for time frequency decomposition using wavelets).
+
+
+```{code-cell} ipython3
+# Compute the wavelet transform using the parameters above
+mwt = nap.compute_wavelet_transform(
+ sig, fs=Fs, freqs=freqs, gaussian_width=1.5, window_length=1.0
+)
+```
+
+`mwt` for Morlet wavelet transform is a `TsdFrame`. Each column is the result of the convolution of the signal with one wavelet.
+
+
+```{code-cell} ipython3
+print(mwt)
+```
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+def plot_timefrequency(freqs, powers, ax=None):
+ im = ax.imshow(np.abs(powers), aspect="auto")
+ ax.invert_yaxis()
+ ax.set_xlabel("Time (s)")
+ ax.set_ylabel("Frequency (Hz)")
+ ax.get_xaxis().set_visible(False)
+ ax.set(yticks=[np.argmin(np.abs(freqs - val)) for val in freqs[::2]], yticklabels=freqs[::2])
+ ax.grid(False)
+ return im
+
+
+fig = plt.figure(constrained_layout=True, figsize=(10, 6))
+fig.suptitle("Wavelet Decomposition")
+gs = plt.GridSpec(2, 1, figure=fig, height_ratios=[1.0, 0.3])
+
+ax0 = plt.subplot(gs[0, 0])
+im = plot_timefrequency(freqs[:], np.transpose(mwt[:, :].values), ax=ax0)
+cbar = fig.colorbar(im, ax=ax0, orientation="vertical")
+
+ax1 = plt.subplot(gs[1, 0])
+ax1.plot(sig)
+ax1.set_ylabel("Signal")
+ax1.set_xlabel("Time (s)")
+ax1.margins(0)
+```
+
+We can see that the decomposition has picked up on the 2Hz component of the signal, as well as the component with
+increasing frequency. In this section, we will extract just the 2Hz component from the wavelet decomposition,
+and see how it compares to the original section.
+
+
+```{code-cell} ipython3
+# Get the index of the 2Hz frequency
+two_hz_freq_idx = np.where(freqs == 2.0)[0]
+# The 2Hz component is the real component of the wavelet decomposition at this index
+slow_oscillation = np.real(mwt[:, two_hz_freq_idx])
+# The 2Hz wavelet phase is the angle of the wavelet decomposition at this index
+slow_oscillation_phase = np.angle(mwt[:, two_hz_freq_idx])
+```
+
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+fig = plt.figure(constrained_layout=True, figsize=(10, 4))
+axd = fig.subplot_mosaic(
+ [["signal"], ["phase"]],
+ height_ratios=[1, 0.4],
+)
+axd["signal"].plot(sig, label="Raw Signal", alpha=0.5)
+axd["signal"].plot(slow_oscillation, label="2Hz Reconstruction")
+axd["signal"].legend()
+axd["signal"].set_ylabel("Signal")
+
+axd["phase"].plot(slow_oscillation_phase, alpha=0.5)
+axd["phase"].set_ylabel("Phase (rad)")
+axd["phase"].set_xlabel("Time (s)")
+[axd[k].margins(0) for k in ["signal", "phase"]]
+```
+
+Let's see what happens if we also add the 15 Hz component of the wavelet decomposition to the reconstruction. We
+will extract the 15 Hz components, and also the 15Hz wavelet power over time. The wavelet power tells us to what
+extent the 15 Hz frequency is present in our signal at different times.
+
+Finally, we will add this 15 Hz reconstruction to the one shown above, to see if it improves out reconstructed
+signal.
+
+
+```{code-cell} ipython3
+# Get the index of the 15 Hz frequency
+fifteen_hz_freq_idx = np.where(freqs == 15.0)[0]
+# The 15 Hz component is the real component of the wavelet decomposition at this index
+fifteenHz_oscillation = np.real(mwt[:, fifteen_hz_freq_idx])
+# The 15 Hz poser is the absolute value of the wavelet decomposition at this index
+fifteenHz_oscillation_power = np.abs(mwt[:, fifteen_hz_freq_idx])
+```
+
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+fig = plt.figure(constrained_layout=True, figsize=(10, 4))
+gs = plt.GridSpec(2, 1, figure=fig, height_ratios=[1.0, 1.0])
+
+ax0 = plt.subplot(gs[0, 0])
+ax0.plot(fifteenHz_oscillation, label="15Hz Reconstruction")
+ax0.plot(fifteenHz_oscillation_power, label="15Hz Power")
+ax0.set_xticklabels([])
+
+ax1 = plt.subplot(gs[1, 0])
+ax1.plot(sig, label="Raw Signal", alpha=0.5)
+ax1.plot(
+ slow_oscillation + fifteenHz_oscillation.values, label="2Hz + 15Hz Reconstruction"
+)
+ax1.set_xlabel("Time (s)")
+
+[
+ (a.margins(0), a.legend(), a.set_ylim(-2.5, 2.5), a.set_ylabel("Signal"))
+ for a in [ax0, ax1]
+]
+```
+
+We will now learn how to interpret the parameters of the wavelet, and in particular how to trade off the
+accuracy in the frequency decomposition with the accuracy in the time domain reconstruction;
+
+Up to this point we have used default wavelet and normalization parameters.
+
+Let's now add together the real components of all frequency bands to recreate a version of the original signal.
+
+
+```{code-cell} ipython3
+
+
+combined_oscillations = np.real(np.sum(mwt, axis=1))
+```
+
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+fig, ax = plt.subplots(1, constrained_layout=True, figsize=(10, 4))
+ax.plot(sig, alpha=0.5, label="Signal")
+ax.plot(combined_oscillations, label="Wavelet Reconstruction", alpha=0.5)
+ax.set_xlabel("Time (s)")
+ax.set_ylabel("Signal")
+ax.set_title("Wavelet Reconstruction of Signal")
+ax.set_ylim(-6, 6)
+ax.margins(0)
+ax.legend()
+```
+
+Our reconstruction seems to get the amplitude modulations of our signal correct, but the amplitude is overestimated,
+in particular towards the end of the time period. Often, this is due to a suboptimal choice of parameters, which
+can lead to a low spatial or temporal resolution.
+
+
+
+***
+Effect of `gaussian_width`
+------------------
+Let's increase `gaussian_width` to 7.5 and see the effect on the resultant filter bank.
+
+
+```{code-cell} ipython3
+freqs = np.linspace(1, 25, num=25)
+filter_bank = nap.generate_morlet_filterbank(
+ freqs, 1000, gaussian_width=7.5, window_length=1.0
+)
+
+plot_filterbank(
+ filter_bank,
+ freqs,
+ "Morlet Wavelet Filter Bank (Real Components): gaussian_width=7.5, center_frequency=1.0",
+)
+```
+
+***
+Let's see what effect this has on the Wavelet Scalogram which is generated...
+
+
+```{code-cell} ipython3
+mwt = nap.compute_wavelet_transform(
+ sig, fs=Fs, freqs=freqs, gaussian_width=7.5, window_length=1.0
+)
+```
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+
+fig = plt.figure(constrained_layout=True, figsize=(10, 6))
+fig.suptitle("Wavelet Decomposition")
+gs = plt.GridSpec(2, 1, figure=fig, height_ratios=[1.0, 0.3])
+
+ax0 = plt.subplot(gs[0, 0])
+im = plot_timefrequency(freqs[:], np.transpose(mwt[:, :].values), ax=ax0)
+cbar = fig.colorbar(im, ax=ax0, orientation="vertical")
+
+ax1 = plt.subplot(gs[1, 0])
+ax1.plot(sig)
+ax1.set_ylabel("Signal")
+ax1.set_xlabel("Time (s)")
+ax1.margins(0)
+```
+
+***
+And let's see if that has an effect on the reconstructed version of the signal
+
+
+```{code-cell} ipython3
+combined_oscillations = mwt.sum(axis=1).real()
+```
+
+Lets plot it.
+
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+fig, ax = plt.subplots(1, constrained_layout=True, figsize=(10, 4))
+ax.plot(sig, alpha=0.5, label="Signal")
+ax.plot(t, combined_oscillations, label="Wavelet Reconstruction", alpha=0.5)
+[ax.spines[sp].set_visible(False) for sp in ["right", "top"]]
+ax.set_xlabel("Time (s)")
+ax.set_ylabel("Signal")
+ax.set_title("Wavelet Reconstruction of Signal")
+ax.set_ylim(-6, 6)
+ax.margins(0)
+ax.legend()
+```
+
+There's a small improvement, but perhaps we can do better.
+
+
++++
+
+***
+Effect of `window_length`
+------------------
+Let's increase `window_length` to 2.0 and see the effect on the resultant filter bank.
+
+
+```{code-cell} ipython3
+freqs = np.linspace(1, 25, num=25)
+filter_bank = nap.generate_morlet_filterbank(
+ freqs, 1000, gaussian_width=7.5, window_length=2.0
+)
+
+plot_filterbank(
+ filter_bank,
+ freqs,
+ "Morlet Wavelet Filter Bank (Real Components): gaussian_width=7.5, center_frequency=2.0",
+)
+```
+
+***
+Let's see what effect this has on the Wavelet Scalogram which is generated...
+
+
+```{code-cell} ipython3
+mwt = nap.compute_wavelet_transform(
+ sig, fs=Fs, freqs=freqs, gaussian_width=7.5, window_length=2.0
+)
+```
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+fig = plt.figure(constrained_layout=True, figsize=(10, 6))
+fig.suptitle("Wavelet Decomposition")
+gs = plt.GridSpec(2, 1, figure=fig, height_ratios=[1.0, 0.3])
+
+ax0 = plt.subplot(gs[0, 0])
+im = plot_timefrequency(freqs[:], np.transpose(mwt[:, :].values), ax=ax0)
+cbar = fig.colorbar(im, ax=ax0, orientation="vertical")
+
+ax1 = plt.subplot(gs[1, 0])
+ax1.plot(sig)
+ax1.set_ylabel("Signal")
+ax1.set_xlabel("Time (s)")
+ax1.margins(0)
+```
+
+***
+And let's see if that has an effect on the reconstructed version of the signal
+
+
+```{code-cell} ipython3
+combined_oscillations = np.real(np.sum(mwt, axis=1))
+```
+
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+fig, ax = plt.subplots(1, constrained_layout=True, figsize=(10, 4))
+ax.plot(sig, alpha=0.5, label="Signal")
+ax.plot(combined_oscillations, label="Wavelet Reconstruction", alpha=0.5)
+ax.set_xlabel("Time (s)")
+ax.set_ylabel("Signal")
+ax.set_title("Wavelet Reconstruction of Signal")
+ax.margins(0)
+ax.set_ylim(-6, 6)
+ax.legend()
+```
+
+***
+Effect of L1 vs L2 normalization
+------------------
+`compute_wavelet_transform` contains two options for normalization; L1, and L2.
+By default, L1 is used as it creates cleaner looking decomposition images.
+
+L1 normalization often increases the contrast between significant and insignificant coefficients.
+This can result in a sharper and more defined visual representation, making patterns and structures within
+the signal more evident.
+
+L2 normalization is directly related to the energy of the signal. By normalizing using the
+L2 norm, you ensure that the transformed coefficients preserve the energy distribution of the original signal.
+
+Let's compare two wavelet decomposition, each generated with a different normalization strategy
+
+
+```{code-cell} ipython3
+mwt_l1 = nap.compute_wavelet_transform(
+ sig, fs=Fs, freqs=freqs,
+ gaussian_width=7.5, window_length=2.0,
+ norm="l1"
+)
+mwt_l2 = nap.compute_wavelet_transform(
+ sig, fs=Fs, freqs=freqs,
+ gaussian_width=7.5, window_length=2.0,
+ norm="l2"
+)
+```
+
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+fig = plt.figure(constrained_layout=True, figsize=(10, 6))
+fig.suptitle("Wavelet Decomposition - L1 Normalization")
+gs = plt.GridSpec(2, 1, figure=fig, height_ratios=[1.0, 0.3])
+ax0 = plt.subplot(gs[0, 0])
+im = plot_timefrequency(freqs[:], np.transpose(mwt_l1[:, :].values), ax=ax0)
+cbar = fig.colorbar(im, ax=ax0, orientation="vertical")
+ax1 = plt.subplot(gs[1, 0])
+ax1.plot(sig)
+ax1.set_ylabel("Signal")
+ax1.set_xlabel("Time (s)")
+ax1.margins(0)
+
+fig = plt.figure(constrained_layout=True, figsize=(10, 6))
+fig.suptitle("Wavelet Decomposition - L2 Normalization")
+gs = plt.GridSpec(2, 1, figure=fig, height_ratios=[1.0, 0.3])
+ax0 = plt.subplot(gs[0, 0])
+im = plot_timefrequency(freqs[:], np.transpose(mwt_l2[:, :].values), ax=ax0)
+cbar = fig.colorbar(im, ax=ax0, orientation="vertical")
+ax1 = plt.subplot(gs[1, 0])
+ax1.plot(sig)
+ax1.set_ylabel("Signal")
+ax1.set_xlabel("Time (s)")
+ax1.margins(0)
+```
+
+We see that the l1 normalized image contains a visually clearer image; the 5-15 Hz component of the signal is
+as powerful as the 2 Hz component, so it makes sense that they should be shown with the same power in the scalogram.
+Let's reconstruct the signal using both decompositions and see the resulting reconstruction...
+
+
+```{code-cell} ipython3
+combined_oscillations_l1 = np.real(np.sum(mwt_l1, axis=1))
+combined_oscillations_l2 = np.real(np.sum(mwt_l2, axis=1))
+```
+
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+fig, ax = plt.subplots(1, constrained_layout=True, figsize=(10, 4))
+ax.plot(sig, label="Signal", linewidth=3, alpha=0.6, c="b")
+ax.plot(combined_oscillations_l1, label="Wavelet Reconstruction (L1)", c="g", alpha=0.6)
+ax.plot(combined_oscillations_l2, label="Wavelet Reconstruction (L2)", c="r", alpha=0.6)
+ax.set_xlabel("Time (s)")
+ax.set_ylabel("Signal")
+ax.set_title("Wavelet Reconstruction of Signal")
+ax.margins(0)
+ax.set_ylim(-6, 6)
+ax.legend()
+```
+
+We see that the reconstruction from the L2 normalized decomposition matched the original signal much more closely,
+this is due to the fact that L2 normalization preserved the energy of the original signal in its reconstruction.
+
+:::{card}
+Authors
+^^^
+Kipp Freud](https://kippfreud.com/)
+
+Guillaume Viejo
+
+:::
+
diff --git a/doc/user_guide/12_filtering.md b/doc/user_guide/12_filtering.md
new file mode 100644
index 00000000..37f46615
--- /dev/null
+++ b/doc/user_guide/12_filtering.md
@@ -0,0 +1,384 @@
+---
+jupytext:
+ text_representation:
+ extension: .md
+ format_name: myst
+ format_version: 0.13
+ jupytext_version: 1.16.4
+kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+Filtering
+=========
+
+The filtering module holds the functions for frequency manipulation :
+
+- `nap.apply_bandstop_filter`
+- `nap.apply_lowpass_filter`
+- `nap.apply_highpass_filter`
+- `nap.apply_bandpass_filter`
+
+The functions have similar calling signatures. For example, to filter a 1000 Hz signal between
+10 and 20 Hz using a Butterworth filter:
+
+```
+>>> new_tsd = nap.apply_bandpass_filter(tsd, (10, 20), fs=1000, mode='butter')
+```
+
+Currently, the filtering module provides two methods for frequency manipulation: `butter`
+for a recursive Butterworth filter and `sinc` for a Windowed-sinc convolution. This notebook provides
+a comparison of the two methods.
+
+
+```{code-cell} ipython3
+:tags: [hide-cell]
+import pynapple as nap
+import numpy as np
+import matplotlib.pyplot as plt
+import seaborn as sns
+custom_params = {"axes.spines.right": False, "axes.spines.top": False}
+sns.set_theme(style="ticks", palette="colorblind", font_scale=1.5, rc=custom_params)
+```
+
+***
+Basics
+------
+
+We start by generating a signal with multiple frequencies (2, 10 and 50 Hz).
+
+
+```{code-cell} ipython3
+fs = 1000 # sampling frequency
+t = np.linspace(0, 2, fs * 2)
+f2 = np.cos(t*2*np.pi*2)
+f10 = np.cos(t*2*np.pi*10)
+f50 = np.cos(t*2*np.pi*50)
+
+sig = nap.Tsd(t=t,d=f2+f10+f50 + np.random.normal(0, 0.5, len(t)))
+```
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+fig = plt.figure(figsize = (15, 5))
+plt.plot(sig)
+plt.xlabel("Time (s)")
+```
+
+We can compute the Fourier transform of `sig` to verify that all the frequencies are there.
+
+
+```{code-cell} ipython3
+psd = nap.compute_power_spectral_density(sig, fs, norm=True)
+```
+```{code-cell} ipython3
+:tags: [hide-input]
+
+fig = plt.figure(figsize = (15, 5))
+plt.plot(np.abs(psd))
+plt.xlabel("Frequency (Hz)")
+plt.ylabel("Amplitude")
+plt.xlim(0, 100)
+```
+
+Let's say we would like to see only the 10 Hz component.
+We can use the function `apply_bandpass_filter` with mode `butter` for Butterworth.
+
+
+```{code-cell} ipython3
+sig_butter = nap.apply_bandpass_filter(sig, (8, 12), fs, mode='butter')
+```
+
+Let's compare it to the `sinc` mode for Windowed-sinc.
+
+
+```{code-cell} ipython3
+sig_sinc = nap.apply_bandpass_filter(sig, (8, 12), fs, mode='sinc', transition_bandwidth=0.003)
+```
+
+```{code-cell} ipython3
+:tags: [hide-input]
+fig = plt.figure(figsize = (15, 5))
+plt.subplot(211)
+plt.plot(t, f10, '-', color = 'gray', label = "10 Hz component")
+plt.xlim(0, 1)
+plt.legend()
+plt.subplot(212)
+# plt.plot(sig, alpha=0.5)
+plt.plot(sig_butter, label = "Butterworth")
+plt.plot(sig_sinc, '--', label = "Windowed-sinc")
+plt.legend()
+plt.xlabel("Time (s)")
+plt.xlim(0, 1)
+```
+
+This gives similar results except at the edges.
+
+Another use of filtering is to remove some frequencies. Here we can try to remove
+the 50 Hz component in the signal.
+
+
+```{code-cell} ipython3
+sig_butter = nap.apply_bandstop_filter(sig, cutoff=(45, 55), fs=fs, mode='butter')
+sig_sinc = nap.apply_bandstop_filter(sig, cutoff=(45, 55), fs=fs, mode='sinc', transition_bandwidth=0.004)
+```
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+fig = plt.figure(figsize = (15, 5))
+plt.subplot(211)
+plt.plot(t, sig, '-', color = 'gray', label = "Original signal")
+plt.xlim(0, 1)
+plt.legend()
+plt.subplot(212)
+plt.plot(sig_butter, label = "Butterworth")
+plt.plot(sig_sinc, '--', label = "Windowed-sinc")
+plt.legend()
+plt.xlabel("Time (Hz)")
+plt.xlim(0, 1)
+```
+
+Let's see what frequencies remain;
+
+
+```{code-cell} ipython3
+psd_butter = nap.compute_power_spectral_density(sig_butter, fs, norm=True)
+psd_sinc = nap.compute_power_spectral_density(sig_sinc, fs, norm=True)
+```
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+fig = plt.figure(figsize = (10, 5))
+plt.plot(np.abs(psd_butter), label = "Butterworth filter")
+plt.plot(np.abs(psd_sinc), label = "Windowed-sinc convolution")
+plt.legend()
+plt.xlabel("Frequency (Hz)")
+plt.ylabel("Amplitude")
+plt.xlim(0, 70)
+```
+
+***
+Inspecting frequency fesponses of a filter
+------------------------------------------
+
+We can inspect the frequency response of a filter by plotting its power spectral density (PSD).
+To do this, we can use the `get_filter_frequency_response` function, which returns a pandas Series with the frequencies
+as the index and the PSD as values.
+
+Let's extract the frequency response of a Butterworth filter and a sinc low-pass filter.
+
+
+```{code-cell} ipython3
+# compute the frequency response of the filters
+psd_butter = nap.get_filter_frequency_response(
+ 200, fs,"lowpass", "butter", order=8
+)
+psd_sinc = nap.get_filter_frequency_response(
+ 200, fs,"lowpass", "sinc", transition_bandwidth=0.1
+)
+```
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+# compute the transition bandwidth
+tb_butter = psd_butter[psd_butter > 0.99].index.max(), psd_butter[psd_butter < 0.01].index.min()
+tb_sinc = psd_sinc[psd_sinc > 0.99].index.max(), psd_sinc[psd_sinc < 0.01].index.min()
+
+fig, axs = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(15, 5))
+fig.suptitle("Frequency response", fontsize="x-large")
+axs[0].set_title("Butterworth Filter")
+axs[0].plot(psd_butter)
+axs[0].axvspan(0, tb_butter[0], alpha=0.4, color="green", label="Pass Band")
+axs[0].axvspan(*tb_butter, alpha=0.4, color="orange", label="Transition Band")
+axs[0].axvspan(tb_butter[1], 500, alpha=0.4, color="red", label="Stop Band")
+axs[0].legend().get_frame().set_alpha(1.)
+axs[0].set_xlim(0, 500)
+axs[0].set_xlabel("Frequency (Hz)")
+axs[0].set_ylabel("Amplitude")
+
+axs[1].set_title("Sinc Filter")
+axs[1].plot(psd_sinc)
+axs[1].axvspan(0, tb_sinc[0], alpha=0.4, color="green", label="Pass Band")
+axs[1].axvspan(*tb_sinc, alpha=0.4, color="orange", label="Transition Band")
+axs[1].axvspan(tb_sinc[1], 500, alpha=0.4, color="red", label="Stop Band")
+axs[1].legend().get_frame().set_alpha(1.)
+axs[1].set_xlabel("Frequency (Hz)")
+
+print(f"Transition band butterworth filter: ({int(tb_butter[0])}Hz, {int(tb_butter[1])}Hz)")
+print(f"Transition band sinc filter: ({int(tb_sinc[0])}Hz, {int(tb_sinc[1])}Hz)")
+```
+
+The frequency band with response close to one will be preserved by the filtering (pass band),
+the band with response close to zero will be discarded (stop band), and the band in between will be partially attenuated
+(transition band).
+
+
+:::{note}
+Here, we define the transition band as the range where the amplitude attenuation is between 99% and 1%.
+The `transition_bandwidth` parameter of the sinc filter is approximately the width of the transition band normalized by the sampling frequency. In the example above, if you divide the transition band width of 122Hz by the sampling frequency of 1000Hz, you get 0.122, which is close to the 0.1 value set.
+:::
+
+You can modulate the width of the transition band by setting the `order` parameter of the Butterworth filter
+or the `transition_bandwidth` parameter of the sinc filter.
+First, let's get the frequency response for a Butterworth low pass filter with different order:
+
+
+```{code-cell} ipython3
+butter_freq = {
+ order: nap.get_filter_frequency_response(250, fs, "lowpass", "butter", order=order)
+ for order in [2, 4, 6]}
+```
+
+... and then the frequency response for the Windowed-sinc equivalent with different transition bandwidth.
+
+
+```{code-cell} ipython3
+sinc_freq = {
+ tb: nap.get_filter_frequency_response(250, fs,"lowpass", "sinc", transition_bandwidth=tb)
+ for tb in [0.002, 0.02, 0.2]}
+```
+
+Let's plot the frequency response of both.
+
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+fig = plt.figure(figsize = (20, 10))
+gs = plt.GridSpec(2, 2)
+for order in butter_freq.keys():
+ plt.subplot(gs[0, 0])
+ plt.plot(butter_freq[order], label = f"order={order}")
+ plt.ylabel('Amplitude')
+ plt.legend()
+ plt.title("Butterworth recursive")
+ plt.subplot(gs[1, 0])
+ plt.plot(20*np.log10(butter_freq[order]), label = f"order={order}")
+ plt.xlabel('Frequency [Hz]')
+ plt.ylabel('Amplitude [dB]')
+ plt.ylim(-200,20)
+ plt.legend()
+
+for tb in sinc_freq.keys():
+ plt.subplot(gs[0, 1])
+ plt.plot(sinc_freq[tb], linewidth=2, label= f"width={tb}")
+ plt.ylabel('Amplitude')
+ plt.legend()
+ plt.title("Windowed-sinc conv.")
+ plt.subplot(gs[1, 1])
+ plt.plot(20*np.log10(sinc_freq[tb]), label= f"width={tb}")
+ plt.xlabel('Frequency [Hz]')
+ plt.ylabel('Amplitude [dB]')
+ plt.ylim(-200,20)
+ plt.legend()
+```
+
+
+:::{warning}
+In some cases, the transition bandwidth that is too high generates a kernel that is too short.
+The amplitude of the original signal will then be lower than expected.
+In this case, the solution is to decrease the transition bandwidth when using the windowed-sinc mode.
+Note that this increases the length of the kernel significantly.
+Let see it with the band pass filter.
+:::
+
+
+```{code-cell} ipython3
+sinc_freq = {
+ tb:nap.get_filter_frequency_response((100, 200), fs, "bandpass", "sinc", transition_bandwidth=tb)
+ for tb in [0.004, 0.2]}
+```
+
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+fig = plt.figure()
+for tb in sinc_freq.keys():
+ plt.plot(sinc_freq[tb], linewidth=2, label= f"width={tb}")
+ plt.ylabel('Amplitude')
+ plt.legend()
+ plt.title("Windowed-sinc conv.")
+ plt.legend()
+```
+
+***
+Performances
+------------
+Let's compare the performance of each when varying the number of time points and the number of dimensions.
+
+
+```{code-cell} ipython3
+:tags: [hide-input]
+
+from time import perf_counter
+
+def get_mean_perf(tsd, mode, n=10):
+ tmp = np.zeros(n)
+ for i in range(n):
+ t1 = perf_counter()
+ _ = nap.apply_lowpass_filter(tsd, 0.25 * tsd.rate, mode=mode)
+ t2 = perf_counter()
+ tmp[i] = t2 - t1
+ return [np.mean(tmp), np.std(tmp)]
+
+def benchmark_time_points(mode):
+ times = []
+ for T in np.arange(1000, 100000, 20000):
+ time_array = np.arange(T)/1000
+ data_array = np.random.randn(len(time_array))
+ startend = np.linspace(0, time_array[-1], T//100).reshape(T//200, 2)
+ ep = nap.IntervalSet(start=startend[::2,0], end=startend[::2,1])
+ tsd = nap.Tsd(t=time_array, d=data_array, time_support=ep)
+ times.append([T]+get_mean_perf(tsd, mode))
+ return np.array(times)
+
+def benchmark_dimensions(mode):
+ times = []
+ for n in np.arange(1, 100, 10):
+ time_array = np.arange(10000)/1000
+ data_array = np.random.randn(len(time_array), n)
+ startend = np.linspace(0, time_array[-1], 10000//100).reshape(10000//200, 2)
+ ep = nap.IntervalSet(start=startend[::2,0], end=startend[::2,1])
+ tsd = nap.TsdFrame(t=time_array, d=data_array, time_support=ep)
+ times.append([n]+get_mean_perf(tsd, mode))
+ return np.array(times)
+
+
+times_sinc = benchmark_time_points(mode="sinc")
+times_butter = benchmark_time_points(mode="butter")
+
+dims_sinc = benchmark_dimensions(mode="sinc")
+dims_butter = benchmark_dimensions(mode="butter")
+
+
+plt.figure(figsize = (16, 5))
+plt.subplot(121)
+for arr, label in zip(
+ [times_sinc, times_butter],
+ ["Windowed-sinc", "Butter"],
+ ):
+ plt.plot(arr[:, 0], arr[:, 1], "o-", label=label)
+ plt.fill_between(arr[:, 0], arr[:, 1] - arr[:, 2], arr[:, 1] + arr[:, 2], alpha=0.2)
+plt.legend()
+plt.xlabel("Number of time points")
+plt.ylabel("Time (s)")
+plt.title("Low pass filtering benchmark")
+plt.subplot(122)
+for arr, label in zip(
+ [dims_sinc, dims_butter],
+ ["Windowed-sinc", "Butter"],
+ ):
+ plt.plot(arr[:, 0], arr[:, 1], "o-", label=label)
+ plt.fill_between(arr[:, 0], arr[:, 1] - arr[:, 2], arr[:, 1] + arr[:, 2], alpha=0.2)
+plt.legend()
+plt.xlabel("Number of dimensions")
+plt.ylabel("Time (s)")
+plt.title("Low pass filtering benchmark")
+```
diff --git a/docs/api_guide/tutorial_pynapple_core.py b/docs/api_guide/tutorial_pynapple_core.py
index d9639a76..cd953366 100644
--- a/docs/api_guide/tutorial_pynapple_core.py
+++ b/docs/api_guide/tutorial_pynapple_core.py
@@ -70,7 +70,7 @@
# Interval Sets object
# --------------------
#
-# The [IntervalSet](https://pynapple-org.github.io/pynapple/reference/core/interval_set/) object stores multiple epochs with a common time unit. It can then be used to restrict time series to this particular set of epochs.
+# The [IntervalSet](../../../reference/core/interval_set/) object stores multiple epochs with a common time unit. It can then be used to restrict time series to this particular set of epochs.
epochs = nap.IntervalSet(start=[0, 10], end=[5, 15], time_units="s")
@@ -82,7 +82,7 @@
print(new_tsd)
# %%
-# Multiple operations are available for IntervalSet. For example, IntervalSet can be merged. See the full documentation of the class [here](https://pynapple-org.github.io/pynapple/reference/core/interval_set/#pynapple.core.interval_set.IntervalSet.intersect) for a list of all the functions that can be used to manipulate IntervalSets.
+# Multiple operations are available for IntervalSet. For example, IntervalSet can be merged. See the full documentation of the class [here](../../../reference/core/interval_set/#pynapple.core.interval_set.IntervalSet.intersect) for a list of all the functions that can be used to manipulate IntervalSets.
epoch1 = nap.IntervalSet(start=0, end=10) # no time units passed. Default is us.
@@ -132,7 +132,7 @@
print(count)
# %%
-# One advantage of grouping time series is that metainformation can be added directly on an element-wise basis. In this case, we add labels to each Ts object when instantiating the group and after. We can then use this label to split the group. See the [TsGroup](https://pynapple-org.github.io/pynapple/reference/core/ts_group/) documentation for a complete methodology for splitting TsGroup objects.
+# One advantage of grouping time series is that metainformation can be added directly on an element-wise basis. In this case, we add labels to each Ts object when instantiating the group and after. We can then use this label to split the group. See the [TsGroup](../../../reference/core/ts_group/) documentation for a complete methodology for splitting TsGroup objects.
#
# First we create a pandas Series for the label.
diff --git a/docs/api_guide/tutorial_pynapple_io.py b/docs/api_guide/tutorial_pynapple_io.py
index b4047447..78f820bb 100644
--- a/docs/api_guide/tutorial_pynapple_io.py
+++ b/docs/api_guide/tutorial_pynapple_io.py
@@ -1,5 +1,3 @@
-# coding: utf-8
-
"""
IO Tutorial
===========
diff --git a/docs/api_guide/tutorial_pynapple_quick_start.py b/docs/api_guide/tutorial_pynapple_quick_start.py
index c03d905f..48e6608f 100644
--- a/docs/api_guide/tutorial_pynapple_quick_start.py
+++ b/docs/api_guide/tutorial_pynapple_quick_start.py
@@ -7,15 +7,15 @@
Preprocessing of the data was made with [Kilosort 2.0](https://github.com/MouseLand/Kilosort) and spike sorting was made with [Klusters](https://neurosuite.sourceforge.net/).
-Instructions for installing pynapple can be found [here](https://pynapple-org.github.io/pynapple/#installation).
+Instructions for installing pynapple can be found [here](../../../installation).
***
This notebook is meant to provide an overview of pynapple by going through:
-- **Input output (IO)**. In this case, pynapple will load a NWB file using the [NWBFile object](https://pynapple-org.github.io/pynapple/reference/io/interface_nwb/#pynapple.io.interface_nwb.NWBFile) within a project [Folder](https://pynapple-org.github.io/pynapple/reference/io/folder/) that represent a dataset.
-- **Core functions** that handle time series, interval sets and groups of time series. See this [notebook](https://pynapple-org.github.io/pynapple/generated/api_guide/tutorial_pynapple_core/) for a detailled usage of the core functions.
-- **Process functions**. A small collection of high-level functions widely used in system neuroscience. This [notebook](https://pynapple-org.github.io/pynapple/generated/api_guide/tutorial_pynapple_process) details those functions.
+- **Input output (IO)**. In this case, pynapple will load a NWB file using the [NWBFile object](../../../reference/io/interface_nwb/#pynapple.io.interface_nwb.NWBFile) within a project [Folder](../../../reference/io/folder/) that represent a dataset.
+- **Core functions** that handle time series, interval sets and groups of time series. See this [notebook](../../api_guide/tutorial_pynapple_core/) for a detailled usage of the core functions.
+- **Process functions**. A small collection of high-level functions widely used in system neuroscience. This [notebook](../../api_guide/tutorial_pynapple_process) details those functions.
"""
@@ -63,13 +63,13 @@
# %%
-# We can load the session with the function [load_folder](https://pynapple-org.github.io/pynapple/reference/io/misc/#pynapple.io.misc.load_folder). Pynapple will walks throught the folder and collects every subfolders.
+# We can load the session with the function [load_folder](../../../reference/io/misc/#pynapple.io.misc.load_folder). Pynapple will walks throught the folder and collects every subfolders.
# We can use the attribute `view` or the function `expand` to display a tree view of the dataset. The treeview shows all the compatible data format (i.e npz files or NWBs files) and their equivalent pynapple type.
data = nap.load_folder(DATA_DIRECTORY)
data.view
# %%
-# The object `data` is a [`Folder`](https://pynapple-org.github.io/pynapple/reference/io/folder/) object that allows easy navigation and interaction with a dataset.
+# The object `data` is a [`Folder`](../../../reference/io/folder/) object that allows easy navigation and interaction with a dataset.
# In this case, we want to load the NWB file in the folder `/pynapplenwb`. Data are always lazy loaded. No time series is loaded until it's actually called.
# When calling the NWB file, the object `nwb` is an interface to the NWB file. All the data inside the NWB file that are compatible with one of the pynapple objects are shown with their corresponding keys.
nwb = data["sub-A2929"]["A2929-200711"]["pynapplenwb"]["A2929-200711"]
@@ -79,7 +79,7 @@
# %%
# We can individually call each object and they are actually loaded.
#
-# `units` is a [TsGroup](https://pynapple-org.github.io/pynapple/reference/core/ts_group/) object. It allows to group together time series with different timestamps and couple metainformation to each neuron. In this case, the location of where the neuron was recorded has been added when loading the session for the first time.
+# `units` is a [TsGroup](../../../reference/core/ts_group/) object. It allows to group together time series with different timestamps and couple metainformation to each neuron. In this case, the location of where the neuron was recorded has been added when loading the session for the first time.
# We load `units` as `spikes`
spikes = nwb["units"]
print(spikes)
@@ -90,7 +90,7 @@
print(neuron_0)
# %%
-# `neuron_0` is a [Ts](https://pynapple-org.github.io/pynapple/reference/core/time_series/#pynapple.core.time_series.Ts) object containing the times of the spikes.
+# `neuron_0` is a [Ts](../../../reference/core/time_series/#pynapple.core.time_series.Ts) object containing the times of the spikes.
# %%
# The other information about the session is contained in `nwb["epochs"]`. In this case, the start and end of the sleep and wake epochs. If the NWB time intervals contains tags of the epochs, pynapple will try to group them together and return a dictionary of IntervalSet instead of IntervalSet.
@@ -147,7 +147,7 @@
# ***
# Tuning curves
# -------------
-# Let's do a more advanced analysis. Neurons from ADn (group 0 in the `spikes` group object) are know to fire for a particular direction. Therefore, we can compute their tuning curves, i.e. their firing rates as a function of the head-direction of the animal in the horizontal plane (*ry*). To do this, we can use the function [`compute_1d_tuning_curves`](https://pynapple-org.github.io/pynapple/reference/process/tuning_curves/#pynapple.process.tuning_curves.compute_1d_tuning_curves). In this case, the tuning curves are computed over 120 bins and between 0 and 2$\pi$.
+# Let's do a more advanced analysis. Neurons from ADn (group 0 in the `spikes` group object) are know to fire for a particular direction. Therefore, we can compute their tuning curves, i.e. their firing rates as a function of the head-direction of the animal in the horizontal plane (*ry*). To do this, we can use the function [`compute_1d_tuning_curves`](../../../reference/process/tuning_curves/#pynapple.process.tuning_curves.compute_1d_tuning_curves). In this case, the tuning curves are computed over 120 bins and between 0 and 2$\pi$.
tuning_curves = nap.compute_1d_tuning_curves(
group=spikes, feature=head_direction, nb_bins=121, minmax=(0, 2 * np.pi)
@@ -170,7 +170,7 @@
plt.show()
# %%
-# While ADN neurons show obvious modulation for head-direction, it is not obvious for all CA1 cells. Therefore we want to restrict the remaining of the analyses to only ADN neurons. We can split the `spikes` group with the function [`getby_category`](https://pynapple-org.github.io/pynapple/reference/core/ts_group/#pynapple.core.ts_group.TsGroup.getby_category).
+# While ADN neurons show obvious modulation for head-direction, it is not obvious for all CA1 cells. Therefore we want to restrict the remaining of the analyses to only ADN neurons. We can split the `spikes` group with the function [`getby_category`](../../../reference/core/ts_group/#pynapple.core.ts_group.TsGroup.getby_category).
spikes_by_location = spikes.getby_category("location")
@@ -186,7 +186,7 @@
# ------------
# A classical question with head-direction cells is how pairs stay coordinated across brain states i.e. wake vs sleep (see Peyrache, A., Lacroix, M. M., Petersen, P. C., & Buzsáki, G. (2015). Internally organized mechanisms of the head direction sense. Nature neuroscience, 18(4), 569-575.)
#
-# In this example, this coordination across brain states will be evaluated with cross-correlograms of pairs of neurons. We can call the function [`compute_crosscorrelogram`](https://pynapple-org.github.io/pynapple/reference/process/correlograms/#pynapple.process.correlograms.compute_crosscorrelogram) during both sleep and wake epochs.
+# In this example, this coordination across brain states will be evaluated with cross-correlograms of pairs of neurons. We can call the function [`compute_crosscorrelogram`](../../../reference/process/correlograms/#pynapple.process.correlograms.compute_crosscorrelogram) during both sleep and wake epochs.
cc_wake = nap.compute_crosscorrelogram(
group=spikes_adn,
@@ -240,7 +240,7 @@
# This last analysis shows how to use the pynapple's decoding function.
#
# The previous result indicates a persistent coordination of head-direction cells during sleep. Therefore it is possible to decode a virtual head-direction signal even if the animal is not moving its head.
-# This example uses the function [`decode_1d`](https://pynapple-org.github.io/pynapple/reference/process/decoding/#pynapple.process.decoding.decode_1d) which implements bayesian decoding (see : Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. (1998). Interpreting neuronal population activity by reconstruction: unified framework with application to hippocampal place cells. Journal of neurophysiology, 79(2), 1017-1044.)
+# This example uses the function [`decode_1d`](../../../reference/process/decoding/#pynapple.process.decoding.decode_1d) which implements bayesian decoding (see : Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J. (1998). Interpreting neuronal population activity by reconstruction: unified framework with application to hippocampal place cells. Journal of neurophysiology, 79(2), 1017-1044.)
#
# First we can validate the decoding function with the real position of the head of the animal during wake.
@@ -285,7 +285,7 @@
)
# %%
-# Here we are gonna chain the TsGroup function [`set_info`](https://pynapple-org.github.io/pynapple/reference/core/ts_group/#pynapple.core.ts_group.TsGroup.set_info) and the function [`to_tsd`](https://pynapple-org.github.io/pynapple/reference/core/ts_group/#pynapple.core.ts_group.TsGroup.to_tsd) to flatten the TsGroup and quickly assign to each spikes a corresponding value found in the metadata table. Any columns of the metadata table can be assigned to timestamps in a TsGroup.
+# Here we are gonna chain the TsGroup function [`set_info`](../../../reference/core/ts_group/#pynapple.core.ts_group.TsGroup.set_info) and the function [`to_tsd`](../../../reference/core/ts_group/#pynapple.core.ts_group.TsGroup.to_tsd) to flatten the TsGroup and quickly assign to each spikes a corresponding value found in the metadata table. Any columns of the metadata table can be assigned to timestamps in a TsGroup.
#
# Here the value assign to the spikes comes from the preferred firing direction of the neurons. The following line is a quick way to sort the neurons based on their preferred firing direction
order = np.argsort(np.argmax(tuning_curves_adn.values, 0))
diff --git a/docs/api_guide/tutorial_pynapple_spectrum.py b/docs/api_guide/tutorial_pynapple_spectrum.py
index 39db2432..321792cf 100644
--- a/docs/api_guide/tutorial_pynapple_spectrum.py
+++ b/docs/api_guide/tutorial_pynapple_spectrum.py
@@ -3,7 +3,7 @@
Power spectral density
======================
-See the [documentation](https://pynapple-org.github.io/pynapple/) of Pynapple for instructions on installing the package.
+See the [documentation](/) of Pynapple for instructions on installing the package.
"""
diff --git a/docs/api_guide/tutorial_pynapple_wavelets.py b/docs/api_guide/tutorial_pynapple_wavelets.py
index 20246d0f..9c34cee9 100644
--- a/docs/api_guide/tutorial_pynapple_wavelets.py
+++ b/docs/api_guide/tutorial_pynapple_wavelets.py
@@ -10,7 +10,7 @@
The function `nap.generate_morlet_filterbank` can help parametrize and visualize the Morlet wavelets.
-See the [documentation](https://pynapple-org.github.io/pynapple/) of Pynapple for instructions on installing the package.
+See the [documentation](/) of Pynapple for instructions on installing the package.
This tutorial was made by [Kipp Freud](https://kippfreud.com/).
diff --git a/docs/examples/tutorial_HD_dataset.py b/docs/examples/tutorial_HD_dataset.py
index 5ab0ab6d..d619d329 100644
--- a/docs/examples/tutorial_HD_dataset.py
+++ b/docs/examples/tutorial_HD_dataset.py
@@ -7,7 +7,7 @@
The NWB file for the example is hosted on [OSF](https://osf.io/jb2gd). We show below how to stream it.
The entire dataset can be downloaded [here](https://dandiarchive.org/dandiset/000056).
-See the [documentation](https://pynapple-org.github.io/pynapple/) of Pynapple for instructions on installing the package.
+See the [documentation](/) of Pynapple for instructions on installing the package.
This tutorial was made by Dhruv Mehrotra and Guillaume Viejo.
diff --git a/docs/examples/tutorial_calcium_imaging.py b/docs/examples/tutorial_calcium_imaging.py
index 2df962cc..e900c64f 100644
--- a/docs/examples/tutorial_calcium_imaging.py
+++ b/docs/examples/tutorial_calcium_imaging.py
@@ -9,7 +9,7 @@
The NWB file for the example is hosted on [OSF](https://osf.io/sbnaw). We show below how to stream it.
-See the [documentation](https://pynapple-org.github.io/pynapple/) of Pynapple for instructions on installing the package.
+See the [documentation](../../../) of Pynapple for instructions on installing the package.
This tutorial was made by Sofia Skromne Carrasco and Guillaume Viejo.
diff --git a/docs/examples/tutorial_human_dataset.py b/docs/examples/tutorial_human_dataset.py
index caeb5f0e..6c78a167 100644
--- a/docs/examples/tutorial_human_dataset.py
+++ b/docs/examples/tutorial_human_dataset.py
@@ -8,7 +8,7 @@
The NWB file for the example used here is provided in [this](https://github.com/PeyracheLab/pynacollada/tree/main/pynacollada/Pynapple%20Paper%20Figures/Zheng%202022/000207/sub-4) repository. The entire dataset can be downloaded [here](https://dandiarchive.org/dandiset/000207/0.220216.0323).
-See the [documentation](https://pynapple-org.github.io/pynapple/) of Pynapple for instructions on installing the package.
+See the [documentation](/) of Pynapple for instructions on installing the package.
This tutorial was made by Dhruv Mehrotra.
diff --git a/docs/index.md b/docs/index.md
index 1953f50d..4854300a 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -44,7 +44,7 @@ pynapple is a light-weight python library for neurophysiological data analysis.
Learn the pynapple API with notebooks.
- [:octicons-arrow-right-24: API guide](https://pynapple.org/generated/api_guide/)
+ [:octicons-arrow-right-24: API guide](generated/api_guide/)
- :material-brain:{ .lg .middle} __Neural Analysis__
@@ -52,7 +52,7 @@ pynapple is a light-weight python library for neurophysiological data analysis.
Explore fully worked examples to learn how to analyze neural recordings using pynapple.
- [:octicons-arrow-right-24: Tutorials](https://pynapple.org/generated/examples/)
+ [:octicons-arrow-right-24: Tutorials](generated/examples/)
- :material-cog:{ .lg .middle } __API__
@@ -60,7 +60,7 @@ pynapple is a light-weight python library for neurophysiological data analysis.
Access a detailed description of each module and function, including parameters and functionality.
- [:octicons-arrow-right-24: Modules](https://pynapple.org/reference/)
+ [:octicons-arrow-right-24: Modules](reference/)
- :material-hammer-wrench:{ .lg .middle } __Installation Instructions__
diff --git a/docs/installation.md b/docs/installation.md
index 2a2e7611..1830141a 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -9,33 +9,39 @@ The best way to install pynapple is with pip within a new [conda](https://docs.c
``` {.sourceCode .shell}
-$ conda create --name pynapple pip python=3.8
-$ conda activate pynapple
-$ pip install pynapple
+conda create --name pynapple pip python=3.8
+conda activate pynapple
+pip install pynapple
```
or directly from the source code:
``` {.sourceCode .shell}
-$ conda create --name pynapple pip python=3.8
-$ conda activate pynapple
-$ # clone the repository
-$ git clone https://github.com/pynapple-org/pynapple.git
-$ cd pynapple
-$ # Install in editable mode with `-e` or, equivalently, `--editable`
-$ pip install -e .
+conda create --name pynapple pip python=3.8
+conda activate pynapple
+
+# clone the repository
+git clone https://github.com/pynapple-org/pynapple.git
+cd pynapple
+
+# Install in editable mode with `-e` or, equivalently, `--editable`
+pip install -e .
```
-> **Note**
-> The package is now using a pyproject.toml file for installation and dependencies management. If you want to run the tests, use pip install -e .[dev]
-
-This procedure will install all the dependencies including
-
-- pandas
-- numpy
-- scipy
-- numba
-- pynwb 2.0
-- tabulate
-- h5py
+
+# Dependencies
+
+## Supported python versions
+
+ - Python 3.8+
+
+## Mandatory dependencies
+
+ - pandas
+ - numpy
+ - scipy
+ - numba
+ - pynwb 2.0
+ - tabulate
+ - h5py
diff --git a/docs/quickstart.md b/docs/quickstart.md
index f46b34d1..93a14888 100644
--- a/docs/quickstart.md
+++ b/docs/quickstart.md
@@ -13,11 +13,11 @@ Pynapple now implements signal processing. For example, to filter a 1250 Hz samp
```python
nap.apply_bandpass_filter(signal, (10, 20), fs=1250)
```
-New functions includes power spectral density and Morlet wavelet decomposition. See the [documentation](https://pynapple-org.github.io/pynapple/reference/process/) for more details.
+New functions includes power spectral density and Morlet wavelet decomposition. See the [documentation](/reference/process/) for more details.
### pynapple >= 0.6
-Starting with 0.6, [`IntervalSet`](https://pynapple-org.github.io/pynapple/reference/core/interval_set/) objects are behaving as immutable numpy ndarray. Before 0.6, you could select an interval within an `IntervalSet` object with:
+Starting with 0.6, [`IntervalSet`](/reference/core/interval_set/) objects are behaving as immutable numpy ndarray. Before 0.6, you could select an interval within an `IntervalSet` object with:
```python
new_intervalset = intervalset.loc[[0]] # Selecting first interval
@@ -35,7 +35,7 @@ Starting with 0.4, pynapple rely on the [numpy array container](https://numpy.or
This allows for a better handling of returned objects.
-Additionaly, it is now possible to define time series objects with more than 2 dimensions with `TsdTensor`. You can also look at this [notebook](https://pynapple-org.github.io/pynapple/generated/api_guide/tutorial_pynapple_numpy/) for a demonstration of numpy compatibilities.
+Additionaly, it is now possible to define time series objects with more than 2 dimensions with `TsdTensor`. You can also look at this [notebook](/generated/api_guide/tutorial_pynapple_numpy/) for a demonstration of numpy compatibilities.
Basic Usage
diff --git a/pynapple/core/base_class.py b/pynapple/core/base_class.py
index be08694f..d6a48118 100644
--- a/pynapple/core/base_class.py
+++ b/pynapple/core/base_class.py
@@ -15,7 +15,7 @@
from .utils import check_filename, convert_to_numpy_array
-class Base(abc.ABC):
+class _Base(abc.ABC):
"""
Abstract base class for time series and timestamps objects.
Implement most of the shared functions across concrete classes `Ts`, `Tsd`, `TsdFrame`, `TsdTensor`
diff --git a/pynapple/core/config.py b/pynapple/core/config.py
index fbc59ebb..ca25faa8 100644
--- a/pynapple/core/config.py
+++ b/pynapple/core/config.py
@@ -11,23 +11,21 @@
See the example below to update the backend. Don't forget to install [pynajax](https://github.com/pynapple-org/pynajax).
-``` py
+
import pynapple as nap
import numpy as np
nap.nap_config.set_backend("jax") # Default option is 'numba'.
-```
You can view the current backend with
-``` py
+
>>> print(nap.nap_config.backend)
'jax'
-```
## Warnings configuration
pynapple gives warnings that can be helpful to debug. For example when passing time indexes that are not sorted:
-``` py
+
>>> import pynapple as nap
>>> t = [0, 2, 1]
>>> nap.Ts(t)
@@ -38,11 +36,9 @@
1.0
2.0
shape: 3
-```
pynapple's warnings can be suppressed :
-``` py
>>> nap.nap_config.suppress_time_index_sorting_warnings = True
>>> nap.Ts(t=t)
Time (s)
@@ -50,8 +46,6 @@
1.0
2.0
shape: 3
-```
-
"""
import importlib.util
diff --git a/pynapple/core/interval_set.py b/pynapple/core/interval_set.py
index b87c04dd..86213bfd 100644
--- a/pynapple/core/interval_set.py
+++ b/pynapple/core/interval_set.py
@@ -1,42 +1,5 @@
"""
- The class `IntervalSet` deals with non-overlaping epochs. `IntervalSet` objects can interact with each other or with the time series objects.
-
- The `IntervalSet` object behaves like a numpy ndarray with the limitation that the object is not mutable.
-
- You can still apply any numpy array function to it :
-
- ``` py
- >>> import pynapple as nap
- >>> import numpy as np
- >>> ep = nap.IntervalSet(start=[0, 10], end=[5,20])
- start end
- 0 0 5
- 1 10 20
- shape: (1, 2)
- >>> np.diff(ep, 1)
- UserWarning: Converting IntervalSet to numpy.array
- array([[ 5.],
- [10.]])
- ```
-
- You can slice :
-
- ``` py
- >>> ep[:,0]
- array([ 0., 10.])
- >>> ep[0]
- start end
- 0 0 5
- shape: (1, 2)
- ```
-
- But modifying the `IntervalSet` with raise an error:
-
- ``` py
- >>> ep[0,0] = 1
- RuntimeError: IntervalSet is immutable. Starts and ends have been already sorted.
- ```
-
+The class `IntervalSet` deals with non-overlaping epochs. `IntervalSet` objects can interact with each other or with the time series objects.
"""
import importlib
@@ -79,6 +42,42 @@
class IntervalSet(NDArrayOperatorsMixin, _MetadataMixin):
"""
A class representing a (irregular) set of time intervals in elapsed time, with relative operations
+
+ The `IntervalSet` object behaves like a numpy ndarray with the limitation that the object is not mutable.
+
+ You can still apply any numpy array function to it :
+
+ >>> import pynapple as nap
+ >>> import numpy as np
+ >>> ep = nap.IntervalSet(start=[0, 10], end=[5,20])
+ start end
+ 0 0 5
+ 1 10 20
+ shape: (1, 2)
+
+ >>> np.diff(ep, 1)
+ UserWarning: Converting IntervalSet to numpy.array
+ array([[ 5.],
+ [10.]])
+
+
+ You can slice :
+
+ >>> ep[:,0]
+ array([ 0., 10.])
+
+ >>> ep[0]
+ start end
+ 0 0 5
+ shape: (1, 2)
+
+
+ But modifying the `IntervalSet` with raise an error:
+
+
+ >>> ep[0,0] = 1
+ RuntimeError: IntervalSet is immutable. Starts and ends have been already sorted.
+
"""
def __init__(
@@ -89,27 +88,26 @@ def __init__(
metadata=None,
):
"""
- IntervalSet initializer
-
- If start and end and not aligned, meaning that \n
+ If start and end are not aligned, meaning that:
1. len(start) != len(end)
2. end[i] > start[i]
3. start[i+1] > end[i]
4. start and end are not sorted,
- IntervalSet will try to "fix" the data by eliminating some of the start and end data point
+ IntervalSet will try to "fix" the data by eliminating some of the start and end data points.
Parameters
----------
start : numpy.ndarray or number or pandas.DataFrame or pandas.Series or iterable of (start, end) pairs
Beginning of intervals. Alternatively, the `end` argument can be left out and `start` can be one of the
following:
- - IntervalSet
- - pandas.DataFrame with columns ["start", "end"]
- - iterable of (start, end) pairs
- - a single (start, end) pair
+
+ - IntervalSet
+ - pandas.DataFrame with columns ["start", "end"]
+ - iterable of (start, end) pairs
+ - a single (start, end) pair
end : numpy.ndarray or number or pandas.Series, optional
- Ends of intervals
+ Ends of intervals.
time_units : str, optional
Time unit of the intervals ('us', 'ms', 's' [default])
metadata: pd.DataFrame or dict, optional
@@ -118,7 +116,7 @@ def __init__(
Raises
------
RuntimeError
- If `start` and `end` arguments are of unknown type
+ If `start` and `end` arguments are of unknown type.
"""
# set directly in __dict__ to avoid infinite recursion in __setattr__
diff --git a/pynapple/core/time_series.py b/pynapple/core/time_series.py
index dbbad475..49c6cd67 100644
--- a/pynapple/core/time_series.py
+++ b/pynapple/core/time_series.py
@@ -27,7 +27,7 @@
from tabulate import tabulate
from ._core_functions import _bin_average, _convolve, _dropna, _restrict, _threshold
-from .base_class import Base
+from .base_class import _Base
from .interval_set import IntervalSet
from .metadata_class import _MetadataMixin
from .time_index import TsIndex
@@ -62,7 +62,7 @@ def _get_class(data):
return TsdTensor
-class BaseTsd(Base, NDArrayOperatorsMixin, abc.ABC):
+class _BaseTsd(_Base, NDArrayOperatorsMixin, abc.ABC):
"""
Abstract base class for time series objects.
Implement most of the shared functions across concrete classes `Tsd`, `TsdFrame`, `TsdTensor`
@@ -311,7 +311,7 @@ def value_from(self, data, ep=None):
52 52
"""
assert isinstance(
- data, BaseTsd
+ data, _BaseTsd
), "First argument should be an instance of Tsd, TsdFrame or TsdTensor"
t, d, time_support, kwargs = super().value_from(data, ep)
@@ -646,7 +646,7 @@ def interpolate(self, ts, ep=None, left=None, right=None):
right : None, optional
Value to return for ts > tsd[-1], default is tsd[-1].
"""
- if not isinstance(ts, Base):
+ if not isinstance(ts, _Base):
raise IOError(
"First argument should be an instance of Ts, Tsd, TsdFrame or TsdTensor"
)
@@ -707,9 +707,9 @@ def interpolate(self, ts, ep=None, left=None, right=None):
return self.__class__(t=new_t, d=new_d, **kwargs_dict)
-class TsdTensor(BaseTsd):
+class TsdTensor(_BaseTsd):
"""
- TsdTensor
+ Container for neurophysiological time series with more than 2 dimensions (movies).
Attributes
----------
@@ -776,16 +776,16 @@ def create_str(array):
if self.shape[0] > max_rows:
n_rows = max_rows // 2
for i, array in zip(self.index[0:n_rows], self.values[0:n_rows]):
- _str_.append([i.__repr__(), create_str(array)])
+ _str_.append([i, create_str(array)])
_str_.append(["...", ""])
for i, array in zip(
self.index[-n_rows:],
self.values[self.values.shape[0] - n_rows : self.values.shape[0]],
):
- _str_.append([i.__repr__(), create_str(array)])
+ _str_.append([i, create_str(array)])
else:
for i, array in zip(self.index, self.values):
- _str_.append([i.__repr__(), create_str(array)])
+ _str_.append([i, create_str(array)])
return tabulate(_str_, headers=headers, colalign=("left",)) + "\n" + bottom
@@ -866,9 +866,9 @@ def save(self, filename):
return
-class TsdFrame(BaseTsd, _MetadataMixin):
+class TsdFrame(_BaseTsd, _MetadataMixin):
"""
- TsdFrame
+ Column-based container for neurophysiological time series.
Attributes
----------
@@ -1235,9 +1235,9 @@ def save(self, filename):
return
-class Tsd(BaseTsd):
+class Tsd(_BaseTsd):
"""
- A container around numpy.ndarray specialized for neurophysiology time series.
+ 1-dimensional container for neurophysiological time series.
Tsd provides standardized time representation, plus various functions for manipulating times series.
@@ -1540,9 +1540,9 @@ def save(self, filename):
return
-class Ts(Base):
+class Ts(_Base):
"""
- Timestamps only object for a time series with only time index,
+ Timestamps only object for a time series with only time index.
Attributes
----------
@@ -1589,12 +1589,12 @@ def __repr__(self):
if len(self) > max_rows:
n_rows = max_rows // 2
_str_ = "\n".join(
- [i.__repr__() for i in self.index[0:n_rows]]
+ [str(i) for i in self.index[0:n_rows]]
+ ["..."]
- + [i.__repr__() for i in self.index[-n_rows:]]
+ + [str(i) for i in self.index[-n_rows:]]
)
else:
- _str_ = "\n".join([i.__repr__() for i in self.index])
+ _str_ = "\n".join([str(i) for i in self.index])
bottom = "shape: {}".format(len(self.index))
return "\n".join((upper, _str_, bottom))
@@ -1681,7 +1681,7 @@ def value_from(self, data, ep=None):
52 52
"""
assert isinstance(
- data, BaseTsd
+ data, _BaseTsd
), "First argument should be an instance of Tsd, TsdFrame or TsdTensor"
t, d, time_support, kwargs = super().value_from(data, ep)
diff --git a/pynapple/core/ts_group.py b/pynapple/core/ts_group.py
index 342ee6ad..ba4b55cb 100644
--- a/pynapple/core/ts_group.py
+++ b/pynapple/core/ts_group.py
@@ -1,6 +1,7 @@
"""
- The class `TsGroup` helps group objects with different timestamps (i.e. timestamps of spikes of a population of neurons).
+The class `TsGroup` helps group objects with different timestamps
+(i.e. timestamps of spikes of a population of neurons).
"""
@@ -15,12 +16,12 @@
from ._core_functions import _count
from ._jitted_functions import jitunion, jitunion_isets
-from .base_class import Base
+from .base_class import _Base
from .config import nap_config
from .interval_set import IntervalSet
from .metadata_class import _MetadataMixin
from .time_index import TsIndex
-from .time_series import BaseTsd, Ts, Tsd, TsdFrame, is_array_like
+from .time_series import Ts, Tsd, TsdFrame, _BaseTsd, is_array_like
from .utils import _get_terminal_size, check_filename, convert_to_numpy_array
@@ -58,7 +59,7 @@ def _union_intervals(i_sets):
class TsGroup(UserDict, _MetadataMixin):
"""
- The TsGroup is a dictionary-like object to hold multiple [`Ts`][pynapple.core.time_series.Ts] or [`Tsd`][pynapple.core.time_series.Tsd] objects with different time index.
+ Dictionary-like object to group objects with different timestamps (for example timestamps of spikes of a population of neurons).
Attributes
----------
@@ -156,7 +157,7 @@ def __init__(
# Transform elements to Ts/Tsd objects
for k in self.index:
- if not isinstance(data[k], Base):
+ if not isinstance(data[k], _Base):
if isinstance(data[k], list) or is_array_like(data[k]):
warnings.warn(
"Elements should not be passed as {}. Default time units is seconds when creating the Ts object.".format(
@@ -783,6 +784,7 @@ def getby_threshold(self, key, thr, op=">"):
2 4
This exemple shows how to get a new TsGroup with all elements for which the metainfo frequency is above 1.
+
>>> newtsgroup = tsgroup.getby_threshold('freq', 1, op = '>')
Index Freq. (Hz)
------- ------------
@@ -823,6 +825,7 @@ def getby_intervals(self, key, bins):
Examples
--------
+
>>> import pynapple as nap
>>> import numpy as np
>>> tmp = { 0:nap.Ts(t=np.arange(0,200), time_units='s'),
@@ -837,6 +840,7 @@ def getby_intervals(self, key, bins):
2 4 2
This exemple shows how to bin the TsGroup according to one metainfo key.
+
>>> newtsgroup, bincenter = tsgroup.getby_intervals('alpha', [0, 1, 2])
>>> newtsgroup
[ Index Freq. (Hz) alpha
@@ -847,8 +851,10 @@ def getby_intervals(self, key, bins):
1 2 1]
By default, the function returns the center of the bins.
+
>>> bincenter
array([0.5, 1.5])
+
"""
idx = np.digitize(self._metadata[key], bins) - 1
groups = self._metadata.index.groupby(idx)
@@ -875,6 +881,7 @@ def getby_category(self, key):
Examples
--------
+
>>> import pynapple as nap
>>> import numpy as np
>>> tmp = { 0:nap.Ts(t=np.arange(0,200), time_units='s'),
@@ -889,6 +896,7 @@ def getby_category(self, key):
2 4 1
This exemple shows how to group the TsGroup according to one metainfo key.
+
>>> newtsgroup = tsgroup.getby_category('group')
>>> newtsgroup
{0: Index Freq. (Hz) group
@@ -898,6 +906,7 @@ def getby_category(self, key):
------- ------------ -------
1 2 1
2 4 1}
+
"""
groups = self._metadata.groupby(key).groups
sliced = {k: self[list(groups[k])] for k in groups.keys()}
@@ -1119,17 +1128,14 @@ def save(self, filename):
and assigning to each the corresponding index. Typically, a TsGroup like
this :
- ``` py
- TsGroup({
+ >>> TsGroup({
0 : Tsd(t=[0, 2, 4], d=[1, 2, 3])
- 1 : Tsd(t=[1, 5], d=[5, 6])
- })
- ```
+ 1 : Tsd(t=[1, 5], d=[5, 6])})
will be saved as npz with the following keys:
- ``` py
- {
+
+ >>> {
't' : [0, 1, 2, 4, 5],
'd' : [1, 5, 2, 3, 5],
'index' : [0, 1, 0, 0, 1],
@@ -1138,7 +1144,6 @@ def save(self, filename):
'keys' : [0, 1],
'type' : 'TsGroup'
}
- ```
Metadata are saved by columns with the column name as the npz key. To avoid
potential conflicts, make sure the columns name of the metadata are different
@@ -1211,7 +1216,7 @@ def save(self, filename):
for n in self.index:
kl = len(self[n])
times[k : k + kl] = self[n].index
- if isinstance(self[n], BaseTsd):
+ if isinstance(self[n], _BaseTsd):
data[k : k + kl] = self[n].values
index[k : k + kl] = int(n)
k += kl
diff --git a/pynapple/io/folder.py b/pynapple/io/folder.py
index a35af18d..deb7b2fc 100644
--- a/pynapple/io/folder.py
+++ b/pynapple/io/folder.py
@@ -73,7 +73,7 @@ def _walk_folder(tree, folder):
class Folder(UserDict):
"""
- Base class for all type of folders (i.e. Project, Subject, Sessions, ...).
+ Dictionnary like object to walk and loop through nested folders.
Handles files and sub-folders discovery
Attributes
diff --git a/pynapple/io/interface_npz.py b/pynapple/io/interface_npz.py
index cedb779b..4f9e7ca6 100644
--- a/pynapple/io/interface_npz.py
+++ b/pynapple/io/interface_npz.py
@@ -1,12 +1,3 @@
-#!/usr/bin/env python
-
-# -*- coding: utf-8 -*-
-# @Author: Guillaume Viejo
-# @Date: 2023-07-05 16:03:25
-# @Last Modified by: Guillaume Viejo
-# @Last Modified time: 2024-08-02 11:16:07
-
-
from pathlib import Path
import numpy as np
@@ -44,7 +35,7 @@ def _find_class_from_variables(file_variables, data_ndims=None):
class NPZFile(object):
- """Class that points to a NPZ file that can be loaded as a pynapple object.
+ """Class to read/write NPZ files as a pynapple object.
Objects have a save function in npz format as well as the Folder class.
Examples
diff --git a/pynapple/process/correlograms.py b/pynapple/process/correlograms.py
index 73f34b0e..c3e0f30f 100644
--- a/pynapple/process/correlograms.py
+++ b/pynapple/process/correlograms.py
@@ -1,13 +1,5 @@
"""
-This module holds the functions to compute discrete cross-correlogram
-for timestamps data (i.e. spike times).
-
-| Function | Description |
-|------|------|
-| `nap.compute_autocorrelogram` | Autocorrelograms from a TsGroup object |
-| `nap.compute_crosscorrelogram` | Crosscorrelogram from a TsGroup object |
-| `nap.compute_eventcorrelogram` | Crosscorrelogram between a TsGroup object and a Ts object |
-
+Functions to compute correlograms of timestamps data.
"""
import inspect
diff --git a/pynapple/process/decoding.py b/pynapple/process/decoding.py
index de031455..13900d3e 100644
--- a/pynapple/process/decoding.py
+++ b/pynapple/process/decoding.py
@@ -1,10 +1,5 @@
-# -*- coding: utf-8 -*-
-# @Author: gviejo
-# @Date: 2022-01-02 23:34:48
-# @Last Modified by: Guillaume Viejo
-# @Last Modified time: 2023-09-22 12:06:13
-
"""
+Decoding functions for timestamps data (spike times). The first argument is always a tuning curves object.
"""
import numpy as np
@@ -14,7 +9,7 @@
def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None):
"""
- Performs Bayesian decoding over a one dimensional feature.
+ Perform Bayesian decoding over a one dimensional feature.
See:
Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J.
(1998). Interpreting neuronal population activity by
@@ -115,7 +110,8 @@ def decode_1d(tuning_curves, group, ep, bin_size, time_units="s", feature=None):
def decode_2d(tuning_curves, group, ep, bin_size, xy, time_units="s", features=None):
"""
- Performs Bayesian decoding over a two dimensional feature.
+ Performs Bayesian decoding over 2 dimensional features.
+
See:
Zhang, K., Ginzburg, I., McNaughton, B. L., & Sejnowski, T. J.
(1998). Interpreting neuronal population activity by
diff --git a/pynapple/process/filtering.py b/pynapple/process/filtering.py
index dc13f967..01303e02 100644
--- a/pynapple/process/filtering.py
+++ b/pynapple/process/filtering.py
@@ -1,4 +1,4 @@
-"""Filtering module."""
+"""Functions for highpass, lowpass, bandpass or bandstop filtering."""
import inspect
from collections.abc import Iterable
@@ -200,7 +200,7 @@ def _compute_filter(
"""
Filter the signal.
"""
- if not isinstance(data, nap.time_series.BaseTsd):
+ if not isinstance(data, nap.time_series._BaseTsd):
raise ValueError(
f"Invalid value: {data}. First argument should be of type Tsd, TsdFrame or TsdTensor"
)
diff --git a/pynapple/process/perievent.py b/pynapple/process/perievent.py
index a3dbb5d1..2af7a731 100644
--- a/pynapple/process/perievent.py
+++ b/pynapple/process/perievent.py
@@ -1,4 +1,4 @@
-"""Perievent functions
+"""Functions to realign time series relative to a reference time.
"""
diff --git a/pynapple/process/randomize.py b/pynapple/process/randomize.py
index 7bbf577d..a3c19fca 100644
--- a/pynapple/process/randomize.py
+++ b/pynapple/process/randomize.py
@@ -1,9 +1,11 @@
+"""
+Functions to shuffle timestamps to create surrogate datasets.
+"""
+
import numpy as np
from .. import core as nap
-# Random shift
-
def shift_timestamps(ts, min_shift=0.0, max_shift=None):
"""
diff --git a/pynapple/process/spectrum.py b/pynapple/process/spectrum.py
index 085ff8fa..cddeb4ec 100644
--- a/pynapple/process/spectrum.py
+++ b/pynapple/process/spectrum.py
@@ -1,11 +1,5 @@
"""
-This module contains functions to compute power spectral density and mean power spectral density.
-
-| Function | Description |
-|------|------|
-| `compute_power_spectral_density` | Compute Power Spectral Density over a single epoch |
-| `compute_mean_power_spectral_density` | Compute Mean Power Spectral Density over multiple epochs |
-
+Functions to compute power spectral density and mean power spectral density.
"""
import inspect
@@ -56,6 +50,7 @@ def compute_power_spectral_density(
sig, fs=None, ep=None, full_range=False, norm=False, n=None
):
"""
+ Compute Power Spectral Density over a single epoch.
Perform numpy fft on sig, returns output assuming a constant sampling rate for the signal.
Parameters
@@ -121,7 +116,7 @@ def compute_mean_power_spectral_density(
time_unit="s",
):
"""
- Compute mean power spectral density by averaging FFT over epochs of same size.
+ Compute Mean Power Spectral Density over multiple epochs of same size.
The parameter `interval_size` controls the duration of the epochs.
diff --git a/pynapple/process/tuning_curves.py b/pynapple/process/tuning_curves.py
index 254e6e24..5df32094 100644
--- a/pynapple/process/tuning_curves.py
+++ b/pynapple/process/tuning_curves.py
@@ -1,16 +1,5 @@
-# -*- coding: utf-8 -*-
"""
-You can compute tuning curves for features in 1 dimension or 2 dimension.
-
-| Function | Description |
-|------|------|
-| `nap.compute_discrete_tuning_curves` | Firing rate from a dictionnary of IntervalSet|
-| `compute_1d_tuning_curves` | Firing rate as a function of a 1-d feature |
-| `compute_2d_tuning_curves` | Firing rate as a function of a 2-d features |
-| `compute_1d_tuning_curves_continuous` | Mean value as a function of a 1-d feature |
-| `compute_2d_tuning_curves_continuous` | Mean value as a function of a 2-d features |
-| `compute_1d_mutual_info` | Mutual information of a tuning curve computed from a 1-d feature. |
-| `compute_2d_mutual_info` | Mutual information of a tuning curve computed from a 2-d features. |
+Functions to compute tuning curves for features in 1 dimension or 2 dimension.
"""
@@ -288,7 +277,9 @@ def compute_2d_tuning_curves(group, features, nb_bins, ep=None, minmax=None):
@_validate_tuning_inputs
def compute_1d_mutual_info(tc, feature, ep=None, minmax=None, bitssec=False):
"""
- Mutual information as defined in
+ Mutual information of a tuning curve computed from a 1-d feature.
+
+ See:
Skaggs, W. E., McNaughton, B. L., & Gothard, K. M. (1993).
An information-theoretic approach to deciphering the hippocampal code.
@@ -355,7 +346,9 @@ def compute_1d_mutual_info(tc, feature, ep=None, minmax=None, bitssec=False):
@_validate_tuning_inputs
def compute_2d_mutual_info(dict_tc, features, ep=None, minmax=None, bitssec=False):
"""
- Mutual information as defined in
+ Mutual information of a tuning curve computed from 2-d features.
+
+ See:
Skaggs, W. E., McNaughton, B. L., & Gothard, K. M. (1993).
An information-theoretic approach to deciphering the hippocampal code.
diff --git a/pynapple/process/wavelets.py b/pynapple/process/wavelets.py
index e8aa601c..8a44370b 100644
--- a/pynapple/process/wavelets.py
+++ b/pynapple/process/wavelets.py
@@ -1,5 +1,5 @@
"""
-# Wavelets decomposition
+Functions to compute wavelets decomposition of a time series.
The main function for doing wavelet decomposition is `nap.compute_wavelet_transform`
diff --git a/pyproject.toml b/pyproject.toml
index 346ec821..00e0a8f0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -55,6 +55,26 @@ dev = [
"flake8", # Code linter
"coverage", # Test coverage measurement
]
+doc = [
+ "matplotlib",
+ "zarr",
+ "seaborn",
+ "tqdm",
+ "numpydoc",
+ "sphinx",
+ "pydata-sphinx-theme",
+ "sphinx-autodoc-typehints",
+ "sphinx-copybutton",
+ "sphinx-design",
+ "sphinx-issues",
+ "sphinxcontrib-apidoc",
+ "myst-parser",
+ "myst-nb",
+ "dandi",
+ "sphinx-autobuild",
+ "sphinx-contributors"
+ # "sphinx-exec-code"
+]
docs = [
"mkdocs", # Documentation generator
"mkdocstrings[python]", # Python-specific plugin for mkdocs
diff --git a/tests/test_abstract_tsd.py b/tests/test_abstract_tsd.py
index fdde8c0e..7c275d79 100644
--- a/tests/test_abstract_tsd.py
+++ b/tests/test_abstract_tsd.py
@@ -2,12 +2,12 @@
import numpy as np
import pandas as pd
import pytest
-from pynapple.core.time_series import BaseTsd
-from pynapple.core.base_class import Base
+from pynapple.core.time_series import _BaseTsd
+from pynapple.core.base_class import _Base
from pynapple.core.time_index import TsIndex
-class MyClass(BaseTsd):
+class MyClass(_BaseTsd):
def __getitem__(self, key):
return key
@@ -21,7 +21,7 @@ def __str__(self):
def __repr__(self):
return "In repr"
-class MyClass2(Base):
+class MyClass2(_Base):
def __getitem__(self, key):
return key
diff --git a/tests/test_time_series.py b/tests/test_time_series.py
index 44bd546c..ac8956c3 100755
--- a/tests/test_time_series.py
+++ b/tests/test_time_series.py
@@ -267,7 +267,7 @@ def test_properties():
def test_base_tsd_class():
- class DummyTsd(nap.core.time_series.BaseTsd):
+ class DummyTsd(nap.core.time_series._BaseTsd):
def __init__(self, t, d):
super().__init__(t, d)