From a837db2ccae3f68065e062886e1f5c3823bde6d8 Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Sat, 9 Nov 2019 16:58:52 +0000 Subject: [PATCH 01/15] uodate to vs code settings --- .vscode/settings.json | 46 +++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 010f705..b2a0602 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -39,33 +39,33 @@ "placeholders", "plugins" ], - "todo-tree.tags": [ - "TODO", - "todo::", - "FIXME", - "NOTE" - ], - "todo-tree.regex": "((//|#-?| >**Attention**: @@ -28,7 +29,7 @@ For an example of the potential input/output, see: Or, for a practical example of the ipypublish capability, see these documents on Atomic 3D Visualisation: [Notebook](https://github.com/chrisjsewell/chrisjsewell.github.io/blob/master/3d_atomic/3D%20Atomic%20Visualisation.ipynb), [PDF](https://chrisjsewell.github.io/3d_atomic/converted/3D%20Atomic%20Visualisation.view_pdf.html), -[HTML](https://chrisjsewell.github.io/3d_atomic/converted/3D%20Atomic%20Visualisation.html) or +[HTML](https://chrisjsewell.github.io/3d_atomic/converted/3D%20Atomic%20Visualisation.html) or [Reveal.JS slideshow](https://chrisjsewell.github.io/3d_atomic/converted/3D%20Atomic%20Visualisation.slides.html). ## Design Philosophy diff --git a/bandit.yml b/bandit.yml index 11d2e06..e96b97a 100644 --- a/bandit.yml +++ b/bandit.yml @@ -1 +1 @@ -skips: ['B101'] # TODO this should only skip in test files, see https://github.com/PyCQA/bandit/issues/346 \ No newline at end of file +skips: ['B101'] # TODO this should only skip in test files, see https://github.com/PyCQA/bandit/issues/346 diff --git a/conda_dev_env.yaml b/conda_dev_env.yaml index 3227323..5c52c1e 100644 --- a/conda_dev_env.yaml +++ b/conda_dev_env.yaml @@ -24,7 +24,7 @@ dependencies: - ruamel.yaml - setuptools - six -- sphinx >=1.6 +- sphinx >=1.8 - sphinxcontrib-bibtex - texsoup <0.2 - tornado @@ -41,7 +41,7 @@ dependencies: - flake8 <3.8.0,>=3.7.0 - rope - pre_commit =1.14.4 -- yapf =0.26.0 +- black =19.3b0 # docs - sphinx_rtd_theme - jupyter diff --git a/conftest.py b/conftest.py index 1ece6b4..970578a 100644 --- a/conftest.py +++ b/conftest.py @@ -1 +1 @@ -pytest_plugins = 'sphinx.testing.fixtures' +pytest_plugins = "sphinx.testing.fixtures" diff --git a/converted/.gitignore b/converted/.gitignore index 08924fd..d918ff6 100644 --- a/converted/.gitignore +++ b/converted/.gitignore @@ -250,5 +250,4 @@ TSWLatexianTemp* # added by CJS .DS_Store -*.frm - +*.frm diff --git a/converted/Example.html b/converted/Example.html index 631083e..28e086e 100644 --- a/converted/Example.html +++ b/converted/Example.html @@ -21619,4 +21619,4 @@

Embed interactive HTML (like i - \ No newline at end of file + diff --git a/converted/Example.slides.html b/converted/Example.slides.html index 49429d2..df35dcf 100644 --- a/converted/Example.slides.html +++ b/converted/Example.slides.html @@ -21711,4 +21711,4 @@

6. Embed interactive HTML ( - \ No newline at end of file + diff --git a/converted/Example.tex b/converted/Example.tex index 6e5a958..691ccdb 100644 --- a/converted/Example.tex +++ b/converted/Example.tex @@ -6,14 +6,14 @@ captions=tableheading,numbers=noendperiod]{scrartcl} %\usepackage{polyglossia} %\setmainlanguage{british} -%\DeclareTextCommandDefault{\nobreakspace}{\leavevmode\nobreak\ } +%\DeclareTextCommandDefault{\nobreakspace}{\leavevmode\nobreak\ } \usepackage[british]{babel} \usepackage[T1]{fontenc} % Nicer default font (+ math font) than Computer Modern for most use cases \usepackage{mathpazo} \usepackage{graphicx} - \usepackage[skip=3pt]{caption} - \usepackage{adjustbox} % Used to constrain images to a maximum size + \usepackage[skip=3pt]{caption} + \usepackage{adjustbox} % Used to constrain images to a maximum size \usepackage[table]{xcolor} % Allow colors to be defined \usepackage{enumerate} % Needed for markdown enumerations to work \usepackage{amsmath} % Equations @@ -28,8 +28,8 @@ \usepackage[mathletters]{ucs} % Extended unicode (utf-8) support \usepackage[utf8x]{inputenc} % Allow utf-8 characters in the tex document \usepackage{fancyvrb} % verbatim replacement that allows latex - \usepackage{grffile} % extends the file name processing of package graphics - % to support a larger range + \usepackage{grffile} % extends the file name processing of package graphics + % to support a larger range % The hyperref package gives us a pdf with properly built % internal navigation ('pdf bookmarks' for the table of contents, % internal cross-reference links, web links for URLs, etc.) @@ -57,7 +57,7 @@ % bibliography formatting \usepackage[numbers, square, super, sort&compress]{natbib} % hyperlink doi's - \usepackage{doi} + \usepackage{doi} % define a code float \usepackage{newfloat} % to define a new float types @@ -259,7 +259,7 @@ \DeclareTranslation{French}{List of Codes}{Liste des Codes} \DeclareTranslation{Italian}{List of Codes}{Elenco dei Codici} \DeclareTranslation{Dutch}{List of Codes}{Lijst van Codes} -\DeclareTranslation{Portuges}{List of Codes}{Lista de C\'{o}digos} +\DeclareTranslation{Portuges}{List of Codes}{Lista de C\'{o}digos} \DeclareTranslationFallback{Supervisors}{Supervisors} \DeclareTranslation{Catalan}{Supervisors}{Supervisors} @@ -269,7 +269,7 @@ \DeclareTranslation{French}{Supervisors}{Superviseurs} \DeclareTranslation{Italian}{Supervisors}{Le autorit\`{a} di vigilanza} \DeclareTranslation{Dutch}{Supervisors}{supervisors} -\DeclareTranslation{Portuguese}{Supervisors}{Supervisores} +\DeclareTranslation{Portuguese}{Supervisors}{Supervisores} \definecolor{codegreen}{rgb}{0,0.6,0} \definecolor{codegray}{rgb}{0.5,0.5,0.5} @@ -282,20 +282,20 @@ numberstyle=\tiny\color{codegray}, stringstyle=\color{codepurple}, basicstyle=\ttfamily, - breakatwhitespace=false, - keepspaces=true, - numbers=left, - numbersep=10pt, - showspaces=false, + breakatwhitespace=false, + keepspaces=true, + numbers=left, + numbersep=10pt, + showspaces=false, showstringspaces=false, - showtabs=false, + showtabs=false, tabsize=2, breaklines=true, literate={\-}{}{0\discretionary{-}{}{-}}, postbreak=\mbox{\textcolor{red}{$\hookrightarrow$}\space}, } -\lstset{style=mystyle} +\lstset{style=mystyle} \surroundwithmdframed[ hidealllines=true, @@ -309,13 +309,13 @@ \usepackage{geometry} \geometry{tmargin=1in,bmargin=1in,lmargin=1in,rmargin=1in, nohead,includefoot,footskip=25pt} -% you can use showframe option to check the margins visually +% you can use showframe option to check the margins visually % ensure new section starts on new page \addtokomafont{section}{\clearpage} % Prevent overflowing lines due to hard-to-break entities - \sloppy + \sloppy % Setup hyperref package \hypersetup{ @@ -358,7 +358,7 @@ % align captions to left (indented) \captionsetup{justification=raggedright, - singlelinecheck=false,format=hang,labelfont={it,bf}} + singlelinecheck=false,format=hang,labelfont={it,bf}} % shift footer down so space between separation line \ModifyLayer[addvoffset=.6ex]{scrheadings.foot.odd} @@ -405,8 +405,8 @@ \vspace{1.5cm} - \begin{minipage}{0.8\textwidth} - \begin{center} + \begin{minipage}{0.8\textwidth} + \begin{center} \begin{minipage}{0.39\textwidth} \begin{flushleft} \Large \emph{\GetTranslation{Author}:}\\Authors Name\\\href{mailto:authors@email.com}{authors@email.com} @@ -419,14 +419,14 @@ Second Supervisor \end{flushright} \end{minipage} - \end{center} + \end{center} \end{minipage} \vfill \begin{minipage}{0.8\textwidth} \begin{center}\LARGE{A tagline for the report.} - \end{center} + \end{center} \end{minipage} \vspace{0.8cm} @@ -524,7 +524,7 @@ \subsection{Displaying a plot with its \begin{codecell} \caption{The plotting code for a matplotlib figure (\cref{fig:example_mpl}).}\label{code:example_mpl}\begin{lstlisting}[language=Python,numbers=left,xleftmargin=20pt,xrightmargin=5pt,belowskip=5pt,aboveskip=5pt] -plt.scatter(np.random.rand(10), np.random.rand(10), +plt.scatter(np.random.rand(10), np.random.rand(10), label='data label') plt.ylabel(r'a y label with latex $\alpha$') plt.legend(); @@ -565,8 +565,8 @@ \section{Tables (with pandas)}\label{tables-with-pandas} \section{Equations (with ipython or sympy)}\label{equations-with-ipython-or-sympy} - \begin{equation}\label{eqn:example_ipy} - a = b+c + \begin{equation}\label{eqn:example_ipy} + a = b+c \end{equation} \begin{codecell}[H] @@ -578,7 +578,7 @@ \section{Equations (with ipython or sym.rsolve(f,y(n),[1,4]) \end{lstlisting}\end{codecell} - \begin{equation}\label{eqn:example_sympy} + \begin{equation}\label{eqn:example_sympy} \left(\sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} - \frac{2 i}{5} \sqrt{5}\right) + \left(- \sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} + \frac{2 i}{5} \sqrt{5}\right) \end{equation} @@ -593,4 +593,3 @@ \section{Embed interactive HTML (like \bibliography{Example_files/example.bib} \end{document} - diff --git a/converted/Example_files/example.bib b/converted/Example_files/example.bib index 50e638d..0bc65c4 100644 --- a/converted/Example_files/example.bib +++ b/converted/Example_files/example.bib @@ -34,5 +34,3 @@ @article{zelenyak_molecular_2016 pages = {400--405}, file = {Zelenyak et al_2016_Molecular dynamics study of perovskite structures with modified interatomic.pdf:/Users/cjs14/Library/Application Support/Firefox/Profiles/gignsb3n.default/zotero/storage/H5NVC2I5/Zelenyak et al_2016_Molecular dynamics study of perovskite structures with modified interatomic.pdf:application/pdf} } - - diff --git a/docs/.gitignore b/docs/.gitignore index 3dbe5de..e306c3f 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -1,4 +1,4 @@ # source/releases.* source/api/ source/*_nbfiles/ -source/converted/ \ No newline at end of file +source/converted/ diff --git a/docs/get_intersphinx_inv.py b/docs/get_intersphinx_inv.py index 214f1f6..7f835ee 100644 --- a/docs/get_intersphinx_inv.py +++ b/docs/get_intersphinx_inv.py @@ -3,23 +3,25 @@ def fetch_inventory(uri): """Read a Sphinx inventory file into a dictionary.""" + class MockConfig(object): intersphinx_timeout = None # type: int tls_verify = False class MockApp(object): - srcdir = '' + srcdir = "" config = MockConfig() def warn(self, msg): warnings.warn(msg) - return intersphinx.fetch_inventory(MockApp(), '', uri) + return intersphinx.fetch_inventory(MockApp(), "", uri) if __name__ == "__main__": from sphinx.ext import intersphinx import warnings + # uri = 'http://jinja.pocoo.org/docs/dev/objects.inv' # uri = "http://nbconvert.readthedocs.io/en/latest/objects.inv" # uri = "http://nbformat.readthedocs.io/en/latest/objects.inv" @@ -28,9 +30,9 @@ def warn(self, msg): # uri = "https://networkx.github.io/documentation/stable/objects.inv" # uri = "http://docs.scipy.org/doc/scipy/reference/objects.inv" # uri = "http://pillow.readthedocs.org/en/latest/objects.inv" - uri = 'http://www.sphinx-doc.org/en/latest/objects.inv' + uri = "http://www.sphinx-doc.org/en/latest/objects.inv" # Read inventory into a dictionary inv = fetch_inventory(uri) # Or just print it - intersphinx.debug(['', uri]) + intersphinx.debug(["", uri]) diff --git a/docs/requirements.txt b/docs/requirements.txt index ea53355..f19e362 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ numpy matplotlib pandas -sympy<1.3 \ No newline at end of file +sympy<1.3 diff --git a/docs/run_apidoc b/docs/run_apidoc index 2b78826..3f6d1bd 100755 --- a/docs/run_apidoc +++ b/docs/run_apidoc @@ -1,3 +1,3 @@ rm -f source/api/*.rst sphinx-apidoc --force --separate -o . ../../ipypublish/ ../../ipypublish/ipypublish/tests ../../ipypublish/setup.py -rm -f source/api/modules.rst \ No newline at end of file +rm -f source/api/modules.rst diff --git a/docs/source/_static/example.bib b/docs/source/_static/example.bib index 3f1df55..5273a35 100644 --- a/docs/source/_static/example.bib +++ b/docs/source/_static/example.bib @@ -38,5 +38,3 @@ @article{zelenyak_molecular_2016 pages = {400--405}, file = {Zelenyak et al_2016_Molecular dynamics study of perovskite structures with modified interatomic.pdf:/Users/cjs14/Library/Application Support/Firefox/Profiles/gignsb3n.default/zotero/storage/H5NVC2I5/Zelenyak et al_2016_Molecular dynamics study of perovskite structures with modified interatomic.pdf:application/pdf} } - - diff --git a/docs/source/_static/example_glossary.bib b/docs/source/_static/example_glossary.bib index 81c2a8f..c7c8820 100644 --- a/docs/source/_static/example_glossary.bib +++ b/docs/source/_static/example_glossary.bib @@ -21,4 +21,4 @@ @glssymbol{symbol1 plural = {\ensuremath{\pi}s}, text = {alternative text}, sort = {b} -} \ No newline at end of file +} diff --git a/docs/source/_static/other_glossary.bib b/docs/source/_static/other_glossary.bib index 2fe4999..0895140 100644 --- a/docs/source/_static/other_glossary.bib +++ b/docs/source/_static/other_glossary.bib @@ -21,4 +21,4 @@ @glssymbol{symbol2 plural = {\ensuremath{\pi}s}, text = {alternative text}, sort = {b} -} \ No newline at end of file +} diff --git a/docs/source/_static/process.svg b/docs/source/_static/process.svg index b9d12d0..d5207fc 100755 --- a/docs/source/_static/process.svg +++ b/docs/source/_static/process.svg @@ -142,7 +142,7 @@ stroke-miterlimit: 7; diff --git a/docs/source/applications.md b/docs/source/applications.md index 6847995..e19c177 100644 --- a/docs/source/applications.md +++ b/docs/source/applications.md @@ -7,4 +7,4 @@ - 2019 Bogota Experimental Economics Workshop: [Intro to Dynamic documents](https://rebelbogota.github.io/beec2019/BEEW2019.html) - Lead by Berkeley Initiative for Transparency in the Social Sciences - See also [Experimetrics-BITSS-Workshop](https://github.com/lordflaron/Experimetrics-BITSS-Workshop) -- [Generating Software Tests eBook](https://www.fuzzingbook.org/) \ No newline at end of file +- [Generating Software Tests eBook](https://www.fuzzingbook.org/) diff --git a/docs/source/conf.py b/docs/source/conf.py index f935902..5379e99 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -33,38 +33,38 @@ import ipypublish from ipypublish.filters_pandoc.main import jinja_filter -on_rtd = os.environ.get('READTHEDOCS') == 'True' +on_rtd = os.environ.get("READTHEDOCS") == "True" # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # -needs_sphinx = '1.6' +needs_sphinx = "1.6" # The master toctree document. -master_doc = 'index' +master_doc = "index" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.mathjax', - 'sphinx.ext.ifconfig', - 'sphinx.ext.viewcode', - 'sphinx.ext.githubpages', # TODO is this needed? - 'sphinx.ext.napoleon', - 'sphinx.ext.autosummary', + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.mathjax", + "sphinx.ext.ifconfig", + "sphinx.ext.viewcode", + "sphinx.ext.githubpages", # TODO is this needed? + "sphinx.ext.napoleon", + "sphinx.ext.autosummary", # 'sphinx.ext.imgconverter' # converts svg to pdf in latex output # TODO imgconverter failing (I guess for process.svg), - 'ipypublish.sphinx.notebook', - 'ipypublish.sphinx.gls', - 'sphinxcontrib.bibtex', - 'recommonmark' + "ipypublish.sphinx.notebook", + "ipypublish.sphinx.gls", + "sphinxcontrib.bibtex", + "recommonmark", ] @@ -88,30 +88,33 @@ def process_citations(app, doctree, docname): try: label = app.env.bibtex_cache.get_label_from_key(key) except KeyError: - logger.warning("could not relabel citation [%s]" % key, - type="bibtex", subtype="relabel") + logger.warning( + "could not relabel citation [%s]" % key, + type="bibtex", + subtype="relabel", + ) else: - node[0] = docutils.nodes.label('', label) + node[0] = docutils.nodes.label("", label) sphinxcontrib.bibtex.process_citations = process_citations -suppress_warnings = ['bibtex.relabel'] +suppress_warnings = ["bibtex.relabel"] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] if sphinx.version_info[0:2] < (1, 8): source_parsers = { # '.md': 'recommonmark.parser.CommonMarkParser', - '.Rmd': 'ipypublish.sphinx.notebook.parser.NBParser' + ".Rmd": "ipypublish.sphinx.notebook.parser.NBParser" } else: source_suffix = { - '.rst': 'restructuredtext', - '.md': 'markdown', - '.ipynb': 'jupyter_notebook', - '.Rmd': 'jupyter_notebook' + ".rst": "restructuredtext", + ".md": "markdown", + ".ipynb": "jupyter_notebook", + ".Rmd": "jupyter_notebook", } # import jupytext # ipysphinx_preconverters = { @@ -126,11 +129,12 @@ def process_citations(app, doctree, docname): # General information about the project. -project = u'ipypublish' -copyright = u'2017, Chris Sewell' -author = u'Chris Sewell' -description = ('Create quality publication and presentation' - 'directly from Jupyter Notebook(s)') +project = u"ipypublish" +copyright = u"2017, Chris Sewell" +author = u"Chris Sewell" +description = ( + "Create quality publication and presentation" "directly from Jupyter Notebook(s)" +) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -149,7 +153,7 @@ def process_citations(app, doctree, docname): language = None # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -170,14 +174,14 @@ def process_citations(app, doctree, docname): # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # html_logo = '_static/doc_icon_100px.png' -html_favicon = '_static/doc_icon_32px.ico' +html_favicon = "_static/doc_icon_32px.ico" # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'ipypublishdoc' +htmlhelp_basename = "ipypublishdoc" # -- Options for LaTeX output --------------------------------------------- @@ -185,15 +189,12 @@ def process_citations(app, doctree, docname): # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -203,18 +204,20 @@ def process_citations(app, doctree, docname): # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'ipypublish.tex', u'ipypublish Documentation', - u'Chris Sewell', 'manual'), + ( + master_doc, + "ipypublish.tex", + u"ipypublish Documentation", + u"Chris Sewell", + "manual", + ) ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'ipypublish', u'ipypublish Documentation', - [author], 1) -] +man_pages = [(master_doc, "ipypublish", u"ipypublish Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -222,24 +225,32 @@ def process_citations(app, doctree, docname): # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'ipypublish', u'IPyPublish', - author, 'ipypublish', description, - 'Miscellaneous'), + ( + master_doc, + "ipypublish", + u"IPyPublish", + author, + "ipypublish", + description, + "Miscellaneous", + ) ] # Numbered Elements numfig = True math_numfig = True numfig_secnum_depth = 2 -numfig_format = {'section': 'Section %s', - 'figure': 'Fig. %s', - 'table': 'Table %s', - 'code-block': 'Code Block %s'} +numfig_format = { + "section": "Section %s", + "figure": "Fig. %s", + "table": "Table %s", + "code-block": "Code Block %s", +} math_number_all = True math_eqref_format = "Eq. {number}" # TODO this isn't working mathjax_config = { - 'TeX': {'equationNumbers': {'autoNumber': 'AMS', 'useLabelIds': True}}, + "TeX": {"equationNumbers": {"autoNumber": "AMS", "useLabelIds": True}} } # Napoleon Docstring settings @@ -258,89 +269,115 @@ def process_citations(app, doctree, docname): # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - 'python': ('https://docs.python.org/3.6', None), + "python": ("https://docs.python.org/3.6", None), # 'numpy': ('http://docs.scipy.org/doc/numpy/', None), # 'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None), # 'matplotlib': ('http://matplotlib.sourceforge.net/', None), # 'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None), # 'IPython': ('http://ipython.org/ipython-doc/stable/', None), - 'PIL': ('http://pillow.readthedocs.org/en/latest/', None), - 'nbconvert': ("http://nbconvert.readthedocs.io/en/latest/", None), - 'nbformat': ("http://nbformat.readthedocs.io/en/latest/", None), - 'tornado': ("https://www.tornadoweb.org/en/stable/", None), - 'traitlets': ("https://traitlets.readthedocs.io/en/stable/", None), - 'jinja': ('http://jinja.pocoo.org/docs/dev', None), - 'bibtexparser': ('https://bibtexparser.readthedocs.io/en/master/', None), + "PIL": ("http://pillow.readthedocs.org/en/latest/", None), + "nbconvert": ("http://nbconvert.readthedocs.io/en/latest/", None), + "nbformat": ("http://nbformat.readthedocs.io/en/latest/", None), + "tornado": ("https://www.tornadoweb.org/en/stable/", None), + "traitlets": ("https://traitlets.readthedocs.io/en/stable/", None), + "jinja": ("http://jinja.pocoo.org/docs/dev", None), + "bibtexparser": ("https://bibtexparser.readthedocs.io/en/master/", None), # 'docutils': ("https://docutils.readthedocs.io/en/sphinx-docs", None), # # TODO docutils intersphinx - 'sphinx': ('http://www.sphinx-doc.org/en/latest/', None) + "sphinx": ("http://www.sphinx-doc.org/en/latest/", None), } intersphinx_aliases = { - ('py:class', 'dictionary'): - ('py:class', 'dict'), - ('py:class', 'PIL.Image'): - ('py:class', 'PIL.Image.Image'), - ('py:class', 'nbconvert.preprocessors.base.Preprocessor'): - ('py:class', 'nbconvert.preprocessors.Preprocessor'), - ('py:class', 'nbformat.notebooknode.NotebookNode'): - ('py:class', 'nbformat.NotebookNode'), - ('py:class', 'NotebookNode'): - ('py:class', 'nbformat.NotebookNode'), - ('py:class', 'traitlets.config.configurable.Configurable'): - ('py:module', 'traitlets.config') + ("py:class", "dictionary"): ("py:class", "dict"), + ("py:class", "PIL.Image"): ("py:class", "PIL.Image.Image"), + ("py:class", "nbconvert.preprocessors.base.Preprocessor"): ( + "py:class", + "nbconvert.preprocessors.Preprocessor", + ), + ("py:class", "nbformat.notebooknode.NotebookNode"): ( + "py:class", + "nbformat.NotebookNode", + ), + ("py:class", "NotebookNode"): ("py:class", "nbformat.NotebookNode"), + ("py:class", "traitlets.config.configurable.Configurable"): ( + "py:module", + "traitlets.config", + ), } # Warnings to ignore when using the -n (nitpicky) option # We should ignore any python built-in exception, for instance -nitpick_ignore = [('py:exc', 'ArithmeticError'), ('py:exc', 'AssertionError'), - ('py:exc', 'AttributeError'), ('py:exc', 'BaseException'), - ('py:exc', 'BufferError'), ('py:exc', 'DeprecationWarning'), - ('py:exc', 'EOFError'), ('py:exc', 'EnvironmentError'), - ('py:exc', 'Exception'), ('py:exc', 'FloatingPointError'), - ('py:exc', 'FutureWarning'), ('py:exc', 'GeneratorExit'), - ('py:exc', 'IOError'), ('py:exc', 'ImportError'), - ('py:exc', 'ImportWarning'), ('py:exc', 'IndentationError'), - ('py:exc', 'IndexError'), ('py:exc', 'KeyError'), - ('py:exc', 'KeyboardInterrupt'), ('py:exc', 'LookupError'), - ('py:exc', 'MemoryError'), ('py:exc', 'NameError'), - ('py:exc', 'NotImplementedError'), ('py:exc', 'OSError'), - ('py:exc', 'OverflowError'), - ('py:exc', 'PendingDeprecationWarning'), - ('py:exc', 'ReferenceError'), ('py:exc', 'RuntimeError'), - ('py:exc', 'RuntimeWarning'), ('py:exc', 'StandardError'), - ('py:exc', 'StopIteration'), ('py:exc', 'SyntaxError'), - ('py:exc', 'SyntaxWarning'), ('py:exc', 'SystemError'), - ('py:exc', 'SystemExit'), ('py:exc', 'TabError'), - ('py:exc', 'TypeError'), ('py:exc', 'UnboundLocalError'), - ('py:exc', 'UnicodeDecodeError'), - ('py:exc', 'UnicodeEncodeError'), ('py:exc', 'UnicodeError'), - ('py:exc', 'UnicodeTranslateError'), - ('py:exc', 'UnicodeWarning'), ('py:exc', 'UserWarning'), - ('py:exc', 'VMSError'), ('py:exc', 'ValueError'), - ('py:exc', 'Warning'), ('py:exc', 'WindowsError'), - ('py:exc', 'ZeroDivisionError'), ('py:obj', 'str'), - ('py:obj', 'list'), - ('py:obj', 'tuple'), - ('py:obj', 'int'), - ('py:obj', 'float'), - ('py:obj', 'bool'), - ('py:obj', 'Mapping'), - ('py:obj', 'MutableMapping'), - ('py:func', 'str.format'), - ('py:class', '_abcoll.MutableMapping'), - ('py:class', - 'traitlets.config.configurable.LoggingConfigurable'), - ('py:class', 'docutils.nodes.Element'), - ('py:class', 'docutils.nodes.General'), - ('py:class', 'docutils.nodes.document'), - ('py:class', 'docutils.parsers.rst.Directive'), - ('py:class', 'docutils.transforms.Transform'), - ('py:class', 'docutils.parsers.rst.Parser'), - ('py:class', 'sphinx.parsers.RSTParser'), - ('py:class', 'sphinx.roles.XRefRole'), - ('py:exc', 'nbconvert.pandoc.PandocMissing') - ] +nitpick_ignore = [ + ("py:exc", "ArithmeticError"), + ("py:exc", "AssertionError"), + ("py:exc", "AttributeError"), + ("py:exc", "BaseException"), + ("py:exc", "BufferError"), + ("py:exc", "DeprecationWarning"), + ("py:exc", "EOFError"), + ("py:exc", "EnvironmentError"), + ("py:exc", "Exception"), + ("py:exc", "FloatingPointError"), + ("py:exc", "FutureWarning"), + ("py:exc", "GeneratorExit"), + ("py:exc", "IOError"), + ("py:exc", "ImportError"), + ("py:exc", "ImportWarning"), + ("py:exc", "IndentationError"), + ("py:exc", "IndexError"), + ("py:exc", "KeyError"), + ("py:exc", "KeyboardInterrupt"), + ("py:exc", "LookupError"), + ("py:exc", "MemoryError"), + ("py:exc", "NameError"), + ("py:exc", "NotImplementedError"), + ("py:exc", "OSError"), + ("py:exc", "OverflowError"), + ("py:exc", "PendingDeprecationWarning"), + ("py:exc", "ReferenceError"), + ("py:exc", "RuntimeError"), + ("py:exc", "RuntimeWarning"), + ("py:exc", "StandardError"), + ("py:exc", "StopIteration"), + ("py:exc", "SyntaxError"), + ("py:exc", "SyntaxWarning"), + ("py:exc", "SystemError"), + ("py:exc", "SystemExit"), + ("py:exc", "TabError"), + ("py:exc", "TypeError"), + ("py:exc", "UnboundLocalError"), + ("py:exc", "UnicodeDecodeError"), + ("py:exc", "UnicodeEncodeError"), + ("py:exc", "UnicodeError"), + ("py:exc", "UnicodeTranslateError"), + ("py:exc", "UnicodeWarning"), + ("py:exc", "UserWarning"), + ("py:exc", "VMSError"), + ("py:exc", "ValueError"), + ("py:exc", "Warning"), + ("py:exc", "WindowsError"), + ("py:exc", "ZeroDivisionError"), + ("py:obj", "str"), + ("py:obj", "list"), + ("py:obj", "tuple"), + ("py:obj", "int"), + ("py:obj", "float"), + ("py:obj", "bool"), + ("py:obj", "Mapping"), + ("py:obj", "MutableMapping"), + ("py:func", "str.format"), + ("py:class", "_abcoll.MutableMapping"), + ("py:class", "traitlets.config.configurable.LoggingConfigurable"), + ("py:class", "docutils.nodes.Element"), + ("py:class", "docutils.nodes.General"), + ("py:class", "docutils.nodes.document"), + ("py:class", "docutils.parsers.rst.Directive"), + ("py:class", "docutils.transforms.Transform"), + ("py:class", "docutils.parsers.rst.Parser"), + ("py:class", "sphinx.parsers.RSTParser"), + ("py:class", "sphinx.roles.XRefRole"), + ("py:exc", "nbconvert.pandoc.PandocMissing"), +] try: out = subprocess.check_output(["git", "branch"]).decode("utf8") @@ -378,27 +415,31 @@ def process_citations(app, doctree, docname): {{%- endif %}} __ https://github.com/chrisjsewell/ipypublish/{gitpath}/{{{{ docname }}}} -""".format(gitpath=gitpath, binderpath=binderpath) # noqa: E501 +""".format( # noqa: E501 + gitpath=gitpath, binderpath=binderpath +) def create_git_releases(app): - this_folder = os.path.abspath( - os.path.dirname(os.path.realpath(__file__))) + this_folder = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) - git_history = urllib.request.urlopen( - 'https://api.github.com/repos/chrisjsewell/ipypublish/releases' - ).read().decode('utf-8') + git_history = ( + urllib.request.urlopen( + "https://api.github.com/repos/chrisjsewell/ipypublish/releases" + ) + .read() + .decode("utf-8") + ) # NOTE on vscode this could fail with urllib.error.HTTPError git_history_json = json.loads(git_history) # NOTE on vscode this was failing unless encoding='utf8' was present - with io.open(os.path.join(this_folder, 'releases.rst'), - 'w', encoding="utf8") as f: - f.write('.. _releases:\n\n') - f.write('Releases\n') - f.write('========\n\n') + with io.open(os.path.join(this_folder, "releases.rst"), "w", encoding="utf8") as f: + f.write(".. _releases:\n\n") + f.write("Releases\n") + f.write("========\n\n") for i, r in enumerate(git_history_json): - if r['tag_name'].split(".")[-1] == "0": + if r["tag_name"].split(".")[-1] == "0": level = 2 elif i == 0: f.write("Current Version\n") @@ -406,22 +447,23 @@ def create_git_releases(app): level = 3 else: level = 3 - subtitle = ' '.join([r['tag_name'], '-', r['name'].rstrip(), '\n']) + subtitle = " ".join([r["tag_name"], "-", r["name"].rstrip(), "\n"]) f.write(subtitle) if level == 2: - f.write("-" * (len(subtitle)-1)+"\n") + f.write("-" * (len(subtitle) - 1) + "\n") else: - f.write("~" * (len(subtitle)-1)+"\n") - f.write('\n') - source = jinja_filter(r['body'], "rst", {}, {}) - for line in source.split('\n'): - f.write(' '.join([line.rstrip(), '\n'])) - f.write('\n') + f.write("~" * (len(subtitle) - 1) + "\n") + f.write("\n") + source = jinja_filter(r["body"], "rst", {}, {}) + for line in source.split("\n"): + f.write(" ".join([line.rstrip(), "\n"])) + f.write("\n") def add_intersphinx_aliases_to_inv(app): """see https://github.com/sphinx-doc/sphinx/issues/5603""" from sphinx.ext.intersphinx import InventoryAdapter + inventories = InventoryAdapter(app.builder.env) for alias, target in app.config.intersphinx_aliases.items(): @@ -444,22 +486,20 @@ def run_apidoc(app): """ logger.info("running apidoc") # get correct paths - this_folder = os.path.abspath( - os.path.dirname(os.path.realpath(__file__))) + this_folder = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) api_folder = os.path.join(this_folder, "api") # module_path = ipypublish.utils.get_module_path(ipypublish) - module_path = os.path.normpath( - os.path.join(this_folder, "../../")) + module_path = os.path.normpath(os.path.join(this_folder, "../../")) ignore_paths = [ "../../setup.py", "../../conftest.py", "../../ipypublish/tests", - "../../ipypublish/sphinx/tests" + "../../ipypublish/sphinx/tests", ] ignore_paths = [ - os.path.normpath( - os.path.join(this_folder, p)) for p in ignore_paths] + os.path.normpath(os.path.join(this_folder, p)) for p in ignore_paths + ] if os.path.exists(api_folder): shutil.rmtree(api_folder) @@ -473,6 +513,7 @@ def run_apidoc(app): except ImportError: # Sphinx 1.6 (and earlier) from sphinx import apidoc + argv.insert(0, apidoc.__file__) apidoc.main(argv) @@ -485,13 +526,13 @@ def run_apidoc(app): def get_version(): """alternative to getting directly""" import re + this_folder = os.path.join(os.path.dirname(os.path.realpath(__file__))) init_file = os.path.join(this_folder, "../../ipypublish/__init__.py") with open(init_file) as fobj: content = fobj.read() - match = re.match( - "\\_\\_version\\_\\_\\s*\\=\\s*[\\'\\\"]([0-9\\.]+)", content) + match = re.match("\\_\\_version\\_\\_\\s*\\=\\s*[\\'\\\"]([0-9\\.]+)", content) if not match: raise IOError("couldn't find __version__ in: {}".format(init_file)) return match.group(1) @@ -506,7 +547,7 @@ def setup(app): # app.connect('autodoc-skip-member', skip_deprecated) # add aliases for intersphinx - app.add_config_value('intersphinx_aliases', {}, 'env') - app.connect('builder-inited', run_apidoc) + app.add_config_value("intersphinx_aliases", {}, "env") + app.connect("builder-inited", run_apidoc) # app.connect('builder-inited', create_git_releases) - app.connect('builder-inited', add_intersphinx_aliases_to_inv) + app.connect("builder-inited", add_intersphinx_aliases_to_inv) diff --git a/docs/source/dev_guide.rst b/docs/source/dev_guide.rst index f007ce2..eddfb13 100644 --- a/docs/source/dev_guide.rst +++ b/docs/source/dev_guide.rst @@ -36,24 +36,23 @@ Coding Style Requirements The code style is tested using `flake8 `__, with the configuration set in ``.flake8``, and code should be formatted -with `yapf `__ (configuration set in -``.style.yapf``). +with `black `__. -Installing with ``ipypublish[tests]`` makes the +Installing with ``ipypublish[code_style]`` makes the `pre-commit `__ package available, which will ensure these tests are passed by reformatting the code and testing for -lint errors before submitting a commit. It can be setup by: +lint errors before submitting a commit. It can be set-up by: .. code:: shell >> cd ipypublish >> pre-commit install -Optionally you can run ``yapf`` and ``flake8`` separately: +Optionally you can run ``black`` and ``flake8`` separately: .. code:: shell - >> yapf -i path/to/file # format file in-place + >> black path/to/file >> flake8 Editors like VS Code also have automatic code reformat utilities, which diff --git a/docs/source/getting_started.Rmd b/docs/source/getting_started.Rmd index 4272b43..7575559 100644 --- a/docs/source/getting_started.Rmd +++ b/docs/source/getting_started.Rmd @@ -139,7 +139,7 @@ For a more detailed explanation see the .. important:: - The default conversion (``latex_ipypublish_main``) will **NOT** + The default conversion (``latex_ipypublish_main``) will **NOT** output any cells that are not tagged with metadata. To output all notebook content by default, use ``_ipypublish_all``. diff --git a/docs/source/nb_conversion.rst b/docs/source/nb_conversion.rst index 26bf0bd..ee0f2f0 100644 --- a/docs/source/nb_conversion.rst +++ b/docs/source/nb_conversion.rst @@ -171,14 +171,17 @@ Simple Customisation of Outputs To customise the output of the above defaults, simply download one of: -- :download:`latex_ipypublish_all.json <../../ipypublish/export_plugins/latex_ipypublish_all.json>`. -- :download:`sphinx_ipypublish_all.json <../../ipypublish/export_plugins/html_ipypublish_all.json>`. -- :download:`html_ipypublish_all.json <../../ipypublish/export_plugins/html_ipypublish_all.json>`. -- :download:`slides_ipypublish_all.json <../../ipypublish/export_plugins/slides_ipypublish_all.json>`. +- :download:`latex_ipypublish_all.json + <../../ipypublish/export_plugins/latex_ipypublish_all.json>`. +- :download:`sphinx_ipypublish_all.json + <../../ipypublish/export_plugins/html_ipypublish_all.json>`. +- :download:`html_ipypublish_all.json + <../../ipypublish/export_plugins/html_ipypublish_all.json>`. +- :download:`slides_ipypublish_all.json + <../../ipypublish/export_plugins/slides_ipypublish_all.json>`. Then alter the ``cell_defaults`` and ``nb_defaults`` sections, and run: .. code-block:: console nbpublish -f path/to/new_config.json input.ipynb - diff --git a/docs/source/package_api.rst b/docs/source/package_api.rst index 886d206..9f887ab 100644 --- a/docs/source/package_api.rst +++ b/docs/source/package_api.rst @@ -4,4 +4,4 @@ Package API .. toctree:: :maxdepth: 3 - api/ipypublish \ No newline at end of file + api/ipypublish diff --git a/docs/source/sphinx_ext_bibgloss.rst b/docs/source/sphinx_ext_bibgloss.rst index bddd2e2..ff5b0b8 100644 --- a/docs/source/sphinx_ext_bibgloss.rst +++ b/docs/source/sphinx_ext_bibgloss.rst @@ -299,9 +299,3 @@ Entries have attributes for the main fields, and can output to latex. symbol={\ensuremath{n}}, text={alternative text} } - - - - - - diff --git a/ipypublish/__init__.py b/ipypublish/__init__.py index 70d683f..ec4817e 100644 --- a/ipypublish/__init__.py +++ b/ipypublish/__init__.py @@ -1,3 +1,3 @@ from ipypublish.scripts import nb_setup # noqa: F401 -__version__ = '0.10.9' +__version__ = "0.10.9" diff --git a/ipypublish/bib2glossary/__init__.py b/ipypublish/bib2glossary/__init__.py index 33475f6..18d7f9b 100644 --- a/ipypublish/bib2glossary/__init__.py +++ b/ipypublish/bib2glossary/__init__.py @@ -1,2 +1 @@ -from ipypublish.bib2glossary.classes import ( # noqa: F401 - BibGlossEntry, BibGlossDB) +from ipypublish.bib2glossary.classes import BibGlossEntry, BibGlossDB # noqa: F401 diff --git a/ipypublish/bib2glossary/classes.py b/ipypublish/bib2glossary/classes.py index 920f19b..0cceaf9 100644 --- a/ipypublish/bib2glossary/classes.py +++ b/ipypublish/bib2glossary/classes.py @@ -6,8 +6,11 @@ import bibtexparser from ipypublish.bib2glossary.definitions import ( - ETYPE_GLOSS, ETYPE_ACRONYM, ETYPE_SYMBOL, - NEWGLOSS_FIELDS, NEWACRONYM_FIELDS + ETYPE_GLOSS, + ETYPE_ACRONYM, + ETYPE_SYMBOL, + NEWGLOSS_FIELDS, + NEWACRONYM_FIELDS, ) try: @@ -19,9 +22,7 @@ class BibGlossEntry(object): - _allowed_types = ( - ETYPE_GLOSS, ETYPE_ACRONYM, ETYPE_SYMBOL - ) + _allowed_types = (ETYPE_GLOSS, ETYPE_ACRONYM, ETYPE_SYMBOL) def __init__(self, entry_dict): @@ -29,34 +30,32 @@ def __init__(self, entry_dict): self._entry_dict = entry_dict def _validate_dict(self, dct): - if 'ID' not in dct: + if "ID" not in dct: raise KeyError - if 'ENTRYTYPE' not in dct: + if "ENTRYTYPE" not in dct: raise KeyError - if dct['ENTRYTYPE'] not in self._allowed_types: - raise TypeError( - 'ENTRYTYPE must be one of: {}'.format(self._allowed_types)) + if dct["ENTRYTYPE"] not in self._allowed_types: + raise TypeError("ENTRYTYPE must be one of: {}".format(self._allowed_types)) - if dct['ENTRYTYPE'] == ETYPE_ACRONYM: - if 'abbreviation' not in dct or 'longname' not in dct: + if dct["ENTRYTYPE"] == ETYPE_ACRONYM: + if "abbreviation" not in dct or "longname" not in dct: raise KeyError - elif (dct['ENTRYTYPE'] == ETYPE_GLOSS - or dct['ENTRYTYPE'] == ETYPE_SYMBOL): - if 'name' not in dct or 'description' not in dct: + elif dct["ENTRYTYPE"] == ETYPE_GLOSS or dct["ENTRYTYPE"] == ETYPE_SYMBOL: + if "name" not in dct or "description" not in dct: raise KeyError def _get_key(self): - return self._entry_dict['ID'] + return self._entry_dict["ID"] def _set_key(self, key): - self._entry_dict['ID'] = key + self._entry_dict["ID"] = key key = property(_get_key, _set_key) @property def type(self): - return self._entry_dict['ENTRYTYPE'] + return self._entry_dict["ENTRYTYPE"] def __contains__(self, key): return key in self._entry_dict @@ -67,11 +66,11 @@ def get(self, key): @property def label(self): if self.type == ETYPE_ACRONYM: - return self.get('abbreviation') + return self.get("abbreviation") elif self.type == ETYPE_GLOSS: - return self.get('name') + return self.get("name") elif self.type == ETYPE_SYMBOL: - return self.get('name') + return self.get("name") else: raise NotImplementedError @@ -84,19 +83,19 @@ def sortkey(self): @property def plural(self): - if 'plural' in self: - return self.get('plural') + if "plural" in self: + return self.get("plural") else: return "{}s".format(self.label) @property def text(self): if self.type == ETYPE_ACRONYM: - return self.get('longname') + return self.get("longname") elif self.type == ETYPE_GLOSS: - return self.get('description') + return self.get("description") elif self.type == ETYPE_SYMBOL: - return self.get('description') + return self.get("description") else: raise NotImplementedError @@ -112,22 +111,22 @@ def to_latex(self): options = [] for field in sorted(NEWGLOSS_FIELDS): if field in self: - options.append("{0}={{{1}}}".format( - field, self.get(field))) + options.append("{0}={{{1}}}".format(field, self.get(field))) if self.type == ETYPE_SYMBOL: options.append("type={symbols}") body = "{{{key}}}{{\n {options}\n}}".format( - key=self.key, options=",\n ".join(options)) + key=self.key, options=",\n ".join(options) + ) return "\\newglossaryentry" + body elif self.type == ETYPE_ACRONYM: body = "{{{key}}}{{{abbrev}}}{{{long}}}".format( - key=self.key, abbrev=self.label, long=self.text) + key=self.key, abbrev=self.label, long=self.text + ) options = [] for field in sorted(NEWACRONYM_FIELDS): if field in self: - options.append("{0}={{{1}}}".format( - field, self.get(field))) + options.append("{0}={{{1}}}".format(field, self.get(field))) if options: body = "[" + ",".join(options) + "]" + body @@ -135,7 +134,6 @@ def to_latex(self): class BibGlossDB(MutableMapping): - def __init__(self): self._entries = {} @@ -144,9 +142,9 @@ def __getitem__(self, key): def __setitem__(self, key, entry): if not isinstance(entry, BibGlossEntry): - raise ValueError('value must be a BibGlossEntry') + raise ValueError("value must be a BibGlossEntry") if key != entry.key: - raise ValueError('key must equal entry.key') + raise ValueError("key must equal entry.key") self._entries[key] = entry def __delitem__(self, key): @@ -160,16 +158,19 @@ def __len__(self): @staticmethod def get_fake_entry_obj(key): - return BibGlossEntry({ - 'ENTRYTYPE': ETYPE_GLOSS, - 'ID': key, - 'name': key, - 'description': '' - }) - - def load_bib(self, text_str=None, path=None, bibdb=None, encoding="utf8", - ignore_nongloss_types=False, - ignore_duplicates=False): + return BibGlossEntry( + {"ENTRYTYPE": ETYPE_GLOSS, "ID": key, "name": key, "description": ""} + ) + + def load_bib( + self, + text_str=None, + path=None, + bibdb=None, + encoding="utf8", + ignore_nongloss_types=False, + ignore_duplicates=False, + ): """load a bib file Parameters @@ -191,16 +192,14 @@ def load_bib(self, text_str=None, path=None, bibdb=None, encoding="utf8", bib = None if sum([e is not None for e in [text_str, path, bibdb]]) != 1: - raise ValueError( - "only one of text_str, path or bib must be supplied") + raise ValueError("only one of text_str, path or bib must be supplied") if bibdb is not None: if not isinstance(bibdb, bibtexparser.bibdatabase.BibDatabase): raise ValueError("bib is not a BibDatabase instance") bib = bibdb elif path is not None: if text_str is not None: - raise ValueError( - 'text_str and path cannot be set at the same time') + raise ValueError("text_str and path cannot be set at the same time") with io.open(path, encoding=encoding) as fobj: text_str = fobj.read() @@ -219,19 +218,20 @@ def load_bib(self, text_str=None, path=None, bibdb=None, encoding="utf8", entry = BibGlossEntry(entry_dict) except TypeError: if ignore_nongloss_types: - logger.warning('Skipping non-glossary entry') + logger.warning("Skipping non-glossary entry") continue else: raise if entry.key in entries: if ignore_duplicates: - logger.warning('Skipping duplicate key entry') + logger.warning("Skipping duplicate key entry") continue else: raise KeyError( "the bib file contains " - "multiple entries with the key: {}".format(entry.key)) + "multiple entries with the key: {}".format(entry.key) + ) entries[entry.key] = entry @@ -240,8 +240,14 @@ def load_bib(self, text_str=None, path=None, bibdb=None, encoding="utf8", return True - def load_tex(self, text_str=None, path=None, encoding='utf8', - skip_ioerrors=False, ignore_unknown_types=True): + def load_tex( + self, + text_str=None, + path=None, + encoding="utf8", + skip_ioerrors=False, + ignore_unknown_types=True, + ): """load a tex file Parameters @@ -268,9 +274,9 @@ def load_tex(self, text_str=None, path=None, encoding='utf8', """ from ipypublish.bib2glossary.parse_tex import parse_tex + gterms, acronyms = parse_tex( - text_str=text_str, path=path, encoding=encoding, - skip_ioerrors=skip_ioerrors + text_str=text_str, path=path, encoding=encoding, skip_ioerrors=skip_ioerrors ) entries = {} for key, fields in gterms.items(): @@ -281,8 +287,9 @@ def load_tex(self, text_str=None, path=None, encoding='utf8', fields.pop("type") elif "type" in fields: if not ignore_unknown_types: - raise ValueError("the 'type' is not recognised: " - "{}".format(fields['type'])) + raise ValueError( + "the 'type' is not recognised: " "{}".format(fields["type"]) + ) fields.pop("type") fields["ID"] = key @@ -322,7 +329,7 @@ def guess_path(path): else: return None - def load(self, path, encoding='utf8'): + def load(self, path, encoding="utf8"): """load a file, the type will be guessed from the extension, or (if no extension is given), the available files in the path folder @@ -335,8 +342,7 @@ def load(self, path, encoding='utf8'): """ path = self.guess_path(path) if path is None: - raise IOError( - "no acceptable loader found for path: {}".format(path)) + raise IOError("no acceptable loader found for path: {}".format(path)) basepath, extension = os.path.splitext(str(path)) if extension in [".bib", ".biblatex", ".bibtex"]: self.load_bib(path=path, encoding=encoding) @@ -350,8 +356,8 @@ def to_bib_string(self): bibdb = bibtexparser.bibdatabase.BibDatabase() bibdb.entries = [e.to_dict() for e in self.values()] writer = bibtexparser.bwriter.BibTexWriter() - writer.contents = ['comments', 'entries'] - writer.indent = ' ' + writer.contents = ["comments", "entries"] + writer.indent = " " # writer.order_entries_by = ('ENTRYTYPE', 'ID') return writer.write(bibdb) @@ -369,8 +375,7 @@ def to_latex_dict(self, splitlines=True): string = entry.to_latex() if splitlines: string = string.splitlines() - latex_stings[ - (entry.type, entry.key)] = string + latex_stings[(entry.type, entry.key)] = string return latex_stings def to_latex_string(self): diff --git a/ipypublish/bib2glossary/definitions.py b/ipypublish/bib2glossary/definitions.py index 924ee8b..3b32757 100644 --- a/ipypublish/bib2glossary/definitions.py +++ b/ipypublish/bib2glossary/definitions.py @@ -1,14 +1,10 @@ -ETYPE_GLOSS = 'glsterm' -ETYPE_ACRONYM = 'glsacronym' -ETYPE_SYMBOL = 'glssymbol' +ETYPE_GLOSS = "glsterm" +ETYPE_ACRONYM = "glsacronym" +ETYPE_SYMBOL = "glssymbol" -NEWGLOSS_FIELDS = ( - "name", "description", "plural", "symbol", "text", "sort" -) +NEWGLOSS_FIELDS = ("name", "description", "plural", "symbol", "text", "sort") -NEWACRONYM_FIELDS = ( - "description", "plural", "longplural", "firstplural" -) +NEWACRONYM_FIELDS = ("description", "plural", "longplural", "firstplural") # TODO allow mapping diff --git a/ipypublish/bib2glossary/parse_tex.py b/ipypublish/bib2glossary/parse_tex.py index e042004..c36013c 100644 --- a/ipypublish/bib2glossary/parse_tex.py +++ b/ipypublish/bib2glossary/parse_tex.py @@ -14,15 +14,18 @@ def import_texsoup(): raise ImportError( "to parse tex files, TexSoup must be installed: \n" "pip install texsoup\n" - "conda install -c conda-forge texsoup") + "conda install -c conda-forge texsoup" + ) except SyntaxError: - raise ImportError('TexSoup package is broken on python 2.7, ' - 'so can not be imported for tex parsing') + raise ImportError( + "TexSoup package is broken on python 2.7, " + "so can not be imported for tex parsing" + ) return { "TexSoup": TexSoup, "RArg": RArg, "OArg": OArg, - "TokenWithPosition": TokenWithPosition + "TokenWithPosition": TokenWithPosition, } @@ -40,8 +43,7 @@ def extract_required_val(rarg): """extract the value of a TexSoup RArg""" RArg = import_texsoup()["RArg"] if not isinstance(rarg, RArg): - raise ValueError( - "expected {} to be a required argument".format(type(rarg))) + raise ValueError("expected {} to be a required argument".format(type(rarg))) return rarg.value @@ -62,18 +64,19 @@ def _extract_parameters(texsoup_exprs): if param_name is None: errors.append( "expected expression " - "'{}' to precede a parameter name".format(expr)) + "'{}' to precede a parameter name".format(expr) + ) break if param_name in params: - errors.append( - "parameter '{}' already defined".format(param_name)) + errors.append("parameter '{}' already defined".format(param_name)) else: params[param_name] = expr.value param_name = None else: errors.append( - "expected expression '{}' ".format(expr) + - "to be a parameter name or required argument") + "expected expression '{}' ".format(expr) + + "to be a parameter name or required argument" + ) break if param_name is not None: @@ -90,7 +93,8 @@ def extract_parameters(argument): OArg = import_texsoup()["OArg"] if not isinstance(argument, (OArg, RArg)): raise ValueError( - "expected {} to be of type OArg or RArg".format(type(argument))) + "expected {} to be of type OArg or RArg".format(type(argument)) + ) opt_params, errors = _extract_parameters(argument.exprs) @@ -105,7 +109,8 @@ def create_newgloss_dict(gterm, row=None): if len(arguments) != 2: msg = _create_msg_error( - "could not parse newglossaryterm (arguments != 2)", gterm, row) + "could not parse newglossaryterm (arguments != 2)", gterm, row + ) raise IOError(msg) key = extract_required_val(arguments[0]) @@ -114,16 +119,16 @@ def create_newgloss_dict(gterm, row=None): for error in errors: msg = _create_msg_error( - "error reading 'parameter' block: {}".format(error), - gterm, row) + "error reading 'parameter' block: {}".format(error), gterm, row + ) raise IOError(msg) for param_name, param_value in params.items(): if param_name in fields: raise IOError( - "duplicate parameter '{0}' in key '{1}'".format( - param_name, key)) + "duplicate parameter '{0}' in key '{1}'".format(param_name, key) + ) fields[param_name] = param_value @@ -140,11 +145,13 @@ def create_newacronym_dict(acronym, row=None): if len(arguments) < 3: msg = _create_msg_error( - "could not parse newacronym (too few arguments)", acronym, row) + "could not parse newacronym (too few arguments)", acronym, row + ) raise IOError(msg) if len(arguments) > 4: msg = _create_msg_error( - "could not parse newacronym (too many arguments)", acronym, row) + "could not parse newacronym (too many arguments)", acronym, row + ) raise IOError(msg) key = extract_required_val(arguments[-3]) @@ -156,8 +163,8 @@ def create_newacronym_dict(acronym, row=None): if not isinstance(options, OArg): msg = _create_msg_error( - "expected first argument of newacronym to be 'optional", - acronym, row) + "expected first argument of newacronym to be 'optional", acronym, row + ) raise IOError(msg) opt_params, errors = extract_parameters(options) @@ -165,22 +172,29 @@ def create_newacronym_dict(acronym, row=None): for error in errors: msg = _create_msg_error( "error reading newacronym 'optional' block: {}".format(error), - acronym, row) + acronym, + row, + ) raise IOError(msg) for opt_name, opt_value in opt_params.items(): if opt_name in fields: raise IOError( - "duplicate parameter '{0}' in key '{1}'".format( - opt_name, key)) + "duplicate parameter '{0}' in key '{1}'".format(opt_name, key) + ) fields[opt_name] = opt_value return key, abbreviation, name, fields -def parse_tex(text_str=None, path=None, encoding='utf8', - abbrev_field="abbreviation", fname_field="longname", - skip_ioerrors=False): +def parse_tex( + text_str=None, + path=None, + encoding="utf8", + abbrev_field="abbreviation", + fname_field="longname", + skip_ioerrors=False, +): """parse a tex file containing newglossaryentry and/or newacronym to dict Parameters @@ -212,8 +226,7 @@ def parse_tex(text_str=None, path=None, encoding='utf8', raise ValueError("only one of text_str or path must be supplied") elif path is not None: if text_str is not None: - raise ValueError( - 'text_str and path cannot be set at the same time') + raise ValueError("text_str and path cannot be set at the same time") with io.open(path, encoding=encoding) as fobj: text_str = fobj.read() diff --git a/ipypublish/bib2glossary/test_bib2gloss.py b/ipypublish/bib2glossary/test_bib2gloss.py index 6ad05c8..c821dda 100644 --- a/ipypublish/bib2glossary/test_bib2gloss.py +++ b/ipypublish/bib2glossary/test_bib2gloss.py @@ -53,37 +53,36 @@ def test_load_bib_type_error(): bibgloss = BibGlossDB() with pytest.raises(TypeError): - bibgloss.load_bib( - text_str=dedent(bib_str), ignore_nongloss_types=False) + bibgloss.load_bib(text_str=dedent(bib_str), ignore_nongloss_types=False) def test_load_bib_type_ignore(): bibgloss = BibGlossDB() bibgloss.load_bib(text_str=dedent(bib_str), ignore_nongloss_types=True) - assert set(bibgloss.keys()) == { - 'gtkey1', 'gtkey2', 'akey1', 'akey2', 'skey1'} + assert set(bibgloss.keys()) == {"gtkey1", "gtkey2", "akey1", "akey2", "skey1"} @pytest.mark.skipif( sys.version_info < (3, 0), - reason="SyntaxError on import of texsoup/data.py line 135") + reason="SyntaxError on import of texsoup/data.py line 135", +) def test_load_tex(): bibgloss = BibGlossDB() bibgloss.load_tex(text_str=dedent(tex_str)) assert {k: e.type for k, e in bibgloss.items()} == { - 'gtkey1': 'glsterm', - 'akey1': 'glsacronym', - 'skey1': 'glssymbol'} + "gtkey1": "glsterm", + "akey1": "glsacronym", + "skey1": "glssymbol", + } def test_to_dict(): bibgloss = BibGlossDB() bibgloss.load_bib(text_str=dedent(bib_str), ignore_nongloss_types=True) dct = bibgloss.to_dict() - assert set(dct.keys()) == { - 'gtkey1', 'gtkey2', 'akey1', 'akey2', 'skey1'} + assert set(dct.keys()) == {"gtkey1", "gtkey2", "akey1", "akey2", "skey1"} def test_to_bib_string(): @@ -93,7 +92,7 @@ def test_to_bib_string(): assert re.search( "@glsacronym\\{akey1,.*@glsterm\\{gtkey1,.*@glssymbol\\{skey1.*", string, - re.DOTALL + re.DOTALL, ) @@ -103,31 +102,29 @@ def test_to_latex_dict(): latex_dict = bibgloss.to_latex_dict() print(latex_dict) assert latex_dict == { - ('glsacronym', - 'akey1'): [( - '\\newacronym[description={a description}]{' - 'akey1}{ABRV}{Abbreviation}')], - ('glsacronym', - 'akey2'): [( - '\\newacronym[plural={OTHERs}]{' - 'akey2}{OTHER}{Abbrev of other}')], - ('glsterm', - 'gtkey1'): [ - '\\newglossaryentry{gtkey1}{', - ' description={the description},', - ' name={name}', - '}'], - ('glsterm', - 'gtkey2'): [ - '\\newglossaryentry{gtkey2}{', - ' description={the description of other},', - ' name={other name}', - '}'], - ('glssymbol', - 'skey1'): [ - '\\newglossaryentry{skey1}{', - ' description={the description of symbol},', - ' name={\\pi},', - ' type={symbols}', - '}'] + ("glsacronym", "akey1"): [ + ("\\newacronym[description={a description}]{" "akey1}{ABRV}{Abbreviation}") + ], + ("glsacronym", "akey2"): [ + ("\\newacronym[plural={OTHERs}]{" "akey2}{OTHER}{Abbrev of other}") + ], + ("glsterm", "gtkey1"): [ + "\\newglossaryentry{gtkey1}{", + " description={the description},", + " name={name}", + "}", + ], + ("glsterm", "gtkey2"): [ + "\\newglossaryentry{gtkey2}{", + " description={the description of other},", + " name={other name}", + "}", + ], + ("glssymbol", "skey1"): [ + "\\newglossaryentry{skey1}{", + " description={the description of symbol},", + " name={\\pi},", + " type={symbols}", + "}", + ], } diff --git a/ipypublish/bib2glossary/test_parse_tex.py b/ipypublish/bib2glossary/test_parse_tex.py index 34514ce..ba78ac9 100644 --- a/ipypublish/bib2glossary/test_parse_tex.py +++ b/ipypublish/bib2glossary/test_parse_tex.py @@ -7,7 +7,8 @@ @pytest.mark.skipif( sys.version_info < (3, 0), - reason="SyntaxError on import of texsoup/data.py line 135") + reason="SyntaxError on import of texsoup/data.py line 135", +) def test_parse_acronyms(): text_str = """ @@ -17,18 +18,15 @@ def test_parse_acronyms(): gterms, acronyms = parse_tex(text_str=text_str) assert gterms == {} assert acronyms == { - 'otherkey': { - 'abbreviation': 'OTHER', - 'longname': 'Abbreviation of other'}, - 'thekey': { - 'abbreviation': 'ABRV', - 'longname': 'Abbreviation'} + "otherkey": {"abbreviation": "OTHER", "longname": "Abbreviation of other"}, + "thekey": {"abbreviation": "ABRV", "longname": "Abbreviation"}, } @pytest.mark.skipif( sys.version_info < (3, 0), - reason="SyntaxError on import of texsoup/data.py line 135") + reason="SyntaxError on import of texsoup/data.py line 135", +) def test_parse_acronyms_with_options(): text_str = """ @@ -38,21 +36,24 @@ def test_parse_acronyms_with_options(): gterms, acronyms = parse_tex(text_str=text_str) assert gterms == {} assert acronyms == { - 'otherkey': { - 'abbreviation': 'OTHER', - 'longname': 'Abbreviation of other', - 'description': 'a description'}, - 'thekey': { - 'abbreviation': 'ABRV', - 'longname': 'Abbreviation', - 'longplural': 'Abbreviations', - 'plural': 'ABRVs'} + "otherkey": { + "abbreviation": "OTHER", + "longname": "Abbreviation of other", + "description": "a description", + }, + "thekey": { + "abbreviation": "ABRV", + "longname": "Abbreviation", + "longplural": "Abbreviations", + "plural": "ABRVs", + }, } @pytest.mark.skipif( sys.version_info < (3, 0), - reason="SyntaxError on import of texsoup/data.py line 135") + reason="SyntaxError on import of texsoup/data.py line 135", +) def test_parse_gterms(): text_str = """ @@ -69,19 +70,15 @@ def test_parse_gterms(): gterms, acronyms = parse_tex(text_str=text_str) assert acronyms == {} assert gterms == { - 'otherkey': { - 'description': 'the description of other', - 'name': 'other name'}, - 'thekey': { - 'description': 'the description', - 'name': 'name', - 'type': 'symbols'} + "otherkey": {"description": "the description of other", "name": "other name"}, + "thekey": {"description": "the description", "name": "name", "type": "symbols"}, } @pytest.mark.skipif( sys.version_info < (3, 0), - reason="SyntaxError on import of texsoup/data.py line 135") + reason="SyntaxError on import of texsoup/data.py line 135", +) def test_parse_mixed(): text_str = """ \\newacronym{otherkey}{OTHER}{Abbreviation of other} @@ -93,21 +90,17 @@ def test_parse_mixed(): """ gterms, acronyms = parse_tex(text_str=text_str) assert acronyms == { - 'otherkey': { - 'abbreviation': 'OTHER', - 'longname': 'Abbreviation of other'} + "otherkey": {"abbreviation": "OTHER", "longname": "Abbreviation of other"} } assert gterms == { - 'thekey': { - 'description': 'the description', - 'name': 'name', - 'type': 'symbols'} + "thekey": {"description": "the description", "name": "name", "type": "symbols"} } @pytest.mark.skipif( sys.version_info < (3, 0), - reason="SyntaxError on import of texsoup/data.py line 135") + reason="SyntaxError on import of texsoup/data.py line 135", +) def test_duplicate_key(): text_str = """ \\newacronym{thekey}{OTHER}{Abbreviation of other} @@ -123,7 +116,8 @@ def test_duplicate_key(): @pytest.mark.skipif( sys.version_info < (3, 0), - reason="SyntaxError on import of texsoup/data.py line 135") + reason="SyntaxError on import of texsoup/data.py line 135", +) def test_acronym_ioerror(): text_str = """ \\newacronym{thekey}{Abbreviation of other} @@ -134,7 +128,8 @@ def test_acronym_ioerror(): @pytest.mark.skipif( sys.version_info < (3, 0), - reason="SyntaxError on import of texsoup/data.py line 135") + reason="SyntaxError on import of texsoup/data.py line 135", +) def test_gterm_ioerror(): text_str = """ \\newglossaryentry{} @@ -145,7 +140,8 @@ def test_gterm_ioerror(): @pytest.mark.skipif( sys.version_info < (3, 0), - reason="SyntaxError on import of texsoup/data.py line 135") + reason="SyntaxError on import of texsoup/data.py line 135", +) def test_ioerror_skip(): text_str = """ \\newacronym{thekey}{Abbreviation of other} @@ -154,8 +150,5 @@ def test_ioerror_skip(): gterms, acronyms = parse_tex(text_str=text_str, skip_ioerrors=True) assert gterms == {} assert acronyms == { - "thekey2": { - 'abbreviation': 'ABBR', - 'longname': 'Abbreviation of other' - } + "thekey2": {"abbreviation": "ABBR", "longname": "Abbreviation of other"} } diff --git a/ipypublish/convert/config_manager.py b/ipypublish/convert/config_manager.py index 13309bb..397a0b7 100644 --- a/ipypublish/convert/config_manager.py +++ b/ipypublish/convert/config_manager.py @@ -8,13 +8,18 @@ import jsonschema import nbconvert # noqa: F401 -from ipypublish.utils import (pathlib, handle_error, get_module_path, - read_file_from_directory, read_file_from_module) +from ipypublish.utils import ( + pathlib, + handle_error, + get_module_path, + read_file_from_directory, + read_file_from_module, +) from ipypublish import export_plugins from ipypublish import schema from ipypublish.templates.create_template import create_template -_TEMPLATE_KEY = 'new_template' +_TEMPLATE_KEY = "new_template" _EXPORT_SCHEMA_FILE = "export_config.schema.json" _EXPORT_SCHEMA = None @@ -52,43 +57,56 @@ def load_export_config(export_config_path): export_config_path = pathlib.Path(export_config_path) data = read_file_from_directory( - export_config_path.parent, export_config_path.name, - "export configuration", logger, interp_ext=True) + export_config_path.parent, + export_config_path.name, + "export configuration", + logger, + interp_ext=True, + ) # validate against schema global _EXPORT_SCHEMA if _EXPORT_SCHEMA is None: # lazy load schema once _EXPORT_SCHEMA = read_file_from_directory( - get_module_path(schema), _EXPORT_SCHEMA_FILE, - "export configuration schema", logger, interp_ext=True) + get_module_path(schema), + _EXPORT_SCHEMA_FILE, + "export configuration schema", + logger, + interp_ext=True, + ) try: jsonschema.validate(data, _EXPORT_SCHEMA) except jsonschema.ValidationError as err: handle_error( "validation of export config {} failed against {}: {}".format( - export_config_path, _EXPORT_SCHEMA_FILE, err.message), - jsonschema.ValidationError, logger=logger) + export_config_path, _EXPORT_SCHEMA_FILE, err.message + ), + jsonschema.ValidationError, + logger=logger, + ) return data -def iter_all_export_infos(config_folder_paths=(), - regex="*.json", get_mime=False): +def iter_all_export_infos(config_folder_paths=(), regex="*.json", get_mime=False): """iterate through all export configuration and yield a dict of info""" for name, path in iter_all_export_paths(config_folder_paths, regex): data = load_export_config(path) - info = dict([ - ("key", str(name)), - ("class", data["exporter"]["class"]), - ("path", str(path)), - ("description", data["description"]) - ]) + info = dict( + [ + ("key", str(name)), + ("class", data["exporter"]["class"]), + ("path", str(path)), + ("description", data["description"]), + ] + ) if get_mime: info["mime_type"] = create_exporter_cls( - data["exporter"]["class"]).output_mimetype + data["exporter"]["class"] + ).output_mimetype yield info @@ -104,14 +122,19 @@ def create_exporter_cls(class_str): except ModuleNotFoundError: # noqa: F821 handle_error( "module {} containing exporter class {} not found".format( - module_path, class_name), - ModuleNotFoundError, logger=logger) # noqa: F821 + module_path, class_name + ), + ModuleNotFoundError, + logger=logger, + ) # noqa: F821 if hasattr(export_module, class_name): export_class = getattr(export_module, class_name) else: handle_error( - "module {} does not contain class {}".format( - module_path, class_name), ImportError, logger=logger) + "module {} does not contain class {}".format(module_path, class_name), + ImportError, + logger=logger, + ) return export_class @@ -136,37 +159,53 @@ def load_template(template_key, template_dict): outline_template = read_file_from_directory( template_dict["outline"]["directory"], template_dict["outline"]["file"], - "template outline", logger, interp_ext=False) - outline_name = os.path.join(template_dict["outline"]["directory"], - template_dict["outline"]["file"]) + "template outline", + logger, + interp_ext=False, + ) + outline_name = os.path.join( + template_dict["outline"]["directory"], template_dict["outline"]["file"] + ) else: outline_template = read_file_from_module( template_dict["outline"]["module"], template_dict["outline"]["file"], - "template outline", logger, interp_ext=False) - outline_name = os.path.join(template_dict["outline"]["module"], - template_dict["outline"]["file"]) + "template outline", + logger, + interp_ext=False, + ) + outline_name = os.path.join( + template_dict["outline"]["module"], template_dict["outline"]["file"] + ) segments = [] for snum, segment in enumerate(template_dict.get("segments", [])): if "file" not in segment: - handle_error( - "'file' expected in segment {}".format(snum), - KeyError, logger) + handle_error("'file' expected in segment {}".format(snum), KeyError, logger) if "directory" in segment: seg_data = read_file_from_directory( segment["directory"], - segment["file"], "template segment", logger, interp_ext=True) + segment["file"], + "template segment", + logger, + interp_ext=True, + ) elif "module" in segment: seg_data = read_file_from_module( segment["module"], - segment["file"], "template segment", logger, interp_ext=True) + segment["file"], + "template segment", + logger, + interp_ext=True, + ) else: handle_error( "'directory' or 'module' expected in segment {}".format(snum), - KeyError, logger) + KeyError, + logger, + ) segments.append(seg_data) diff --git a/ipypublish/convert/main.py b/ipypublish/convert/main.py index 5fb9362..3f062b4 100755 --- a/ipypublish/convert/main.py +++ b/ipypublish/convert/main.py @@ -9,6 +9,7 @@ import traitlets as T from traitlets import default, validate, TraitError + # from traitlets import validate from traitlets.config.configurable import Configurable from traitlets.config import Config @@ -17,69 +18,107 @@ import jsonschema import ipypublish -from ipypublish.utils import (pathlib, handle_error, read_file_from_directory, get_module_path, get_valid_filename, - find_entry_point) +from ipypublish.utils import ( + pathlib, + handle_error, + read_file_from_directory, + get_module_path, + get_valid_filename, + find_entry_point, +) from ipypublish import schema from ipypublish.convert.nbmerge import merge_notebooks -from ipypublish.convert.config_manager import (get_export_config_path, load_export_config, load_template, - create_exporter_cls) +from ipypublish.convert.config_manager import ( + get_export_config_path, + load_export_config, + load_template, + create_exporter_cls, +) def dict_to_config(config, unflatten=True, key_as_tuple=False): if unflatten: - config = edict.unflatten(config, key_as_tuple=key_as_tuple, delim='.') + config = edict.unflatten(config, key_as_tuple=key_as_tuple, delim=".") return Config(config) class IpyPubMain(Configurable): - conversion = T.Unicode('latex_ipypublish_main', help='key or path to conversion configuration').tag(config=True) + conversion = T.Unicode( + "latex_ipypublish_main", help="key or path to conversion configuration" + ).tag(config=True) - plugin_folder_paths = T.Set(T.Unicode(), - default_value=(), - help='a list of folders containing conversion configurations').tag(config=True) + plugin_folder_paths = T.Set( + T.Unicode(), + default_value=(), + help="a list of folders containing conversion configurations", + ).tag(config=True) - @validate('plugin_folder_paths') + @validate("plugin_folder_paths") def _validate_plugin_folder_paths(self, proposal): - folder_paths = proposal['value'] + folder_paths = proposal["value"] for path in folder_paths: if not os.path.exists(path): - raise TraitError('the configuration folder path does not exist: ' '{}'.format(path)) - return proposal['value'] - - outpath = T.Union([T.Unicode(), T.Instance(pathlib.Path)], - allow_none=True, - default_value=None, - help='path to output converted files').tag(config=True) - - folder_suffix = T.Unicode('_files', - help=('suffix for the folder name where content will be dumped ' - '(e.g. internal images). ' - 'It will be a sanitized version of the input filename, ' - 'followed by the suffix')).tag(config=True) - - ignore_prefix = T.Unicode('_', help=('prefixes to ignore, ' 'when finding notebooks to merge')).tag(config=True) - - meta_path_placeholder = T.Unicode('${meta_path}', - help=('all string values in the export configuration containing ' - 'this placeholder will be be replaced with the path to the ' - 'notebook from which the metadata was obtained')).tag(config=True) + raise TraitError( + "the configuration folder path does not exist: " "{}".format(path) + ) + return proposal["value"] + + outpath = T.Union( + [T.Unicode(), T.Instance(pathlib.Path)], + allow_none=True, + default_value=None, + help="path to output converted files", + ).tag(config=True) + + folder_suffix = T.Unicode( + "_files", + help=( + "suffix for the folder name where content will be dumped " + "(e.g. internal images). " + "It will be a sanitized version of the input filename, " + "followed by the suffix" + ), + ).tag(config=True) + + ignore_prefix = T.Unicode( + "_", help=("prefixes to ignore, " "when finding notebooks to merge") + ).tag(config=True) + + meta_path_placeholder = T.Unicode( + "${meta_path}", + help=( + "all string values in the export configuration containing " + "this placeholder will be be replaced with the path to the " + "notebook from which the metadata was obtained" + ), + ).tag(config=True) files_folder_placeholder = T.Unicode( - '${files_path}', - help=('all string values in the export configuration containing ' - 'this placeholder will be be replaced with the path ' - '(relative to outpath) to the folder where files will be dumped')).tag(config=True) - - validate_nb_metadata = T.Bool(True, - help=('before running the exporter, validate that ' - 'the notebook level metadata is valid again the schema')).tag(config=True) - - pre_conversion_funcs = T.Dict(help=('a mapping of file extensions to functions that can convert' - 'that file type Instance(nbformat.NotebookNode) = func(pathstr)')).tag( - config=True) - - @default('pre_conversion_funcs') + "${files_path}", + help=( + "all string values in the export configuration containing " + "this placeholder will be be replaced with the path " + "(relative to outpath) to the folder where files will be dumped" + ), + ).tag(config=True) + + validate_nb_metadata = T.Bool( + True, + help=( + "before running the exporter, validate that " + "the notebook level metadata is valid again the schema" + ), + ).tag(config=True) + + pre_conversion_funcs = T.Dict( + help=( + "a mapping of file extensions to functions that can convert" + "that file type Instance(nbformat.NotebookNode) = func(pathstr)" + ) + ).tag(config=True) + + @default("pre_conversion_funcs") def _default_pre_conversion_funcs(self): try: import jupytext # noqa: F401 @@ -92,117 +131,154 @@ def _default_pre_conversion_funcs(self): # this is deprecated in newer versions from jupytext import readf as read # noqa: F401 - return {'.Rmd': read} + return {".Rmd": read} - @validate('pre_conversion_funcs') + @validate("pre_conversion_funcs") def _validate_pre_conversion_funcs(self, proposal): - for ext, func in proposal['value'].items(): - if not ext.startswith('.'): - raise TraitError("the extension key should start with a '.': " '{}'.format(ext)) + for ext, func in proposal["value"].items(): + if not ext.startswith("."): + raise TraitError( + "the extension key should start with a '.': " "{}".format(ext) + ) try: - func('string') + func("string") # TODO should do this safely with inspect, # but no obvious solution # to check if it only requires one string argument except TypeError: - raise TraitError('the function for {} can not be ' - 'called with a single string arg: ' - '{}'.format(ext, func)) + raise TraitError( + "the function for {} can not be " + "called with a single string arg: " + "{}".format(ext, func) + ) except Exception: pass - return proposal['value'] - - log_to_stdout = T.Bool(True, help='whether to log to sys.stdout').tag(config=True) - - log_level_stdout = T.Enum(['debug', 'info', 'warning', 'error', 'DEBUG', 'INFO', 'WARNING', 'ERROR'], - default_value='INFO', - help='the logging level to output to stdout').tag(config=True) - - log_stdout_formatstr = T.Unicode('%(levelname)s:%(name)s:%(message)s').tag(config=True) - - log_to_file = T.Bool(False, help='whether to log to file').tag(config=True) - - log_level_file = T.Enum(['debug', 'info', 'warning', 'error', 'DEBUG', 'INFO', 'WARNING', 'ERROR'], - default_value='INFO', - help='the logging level to output to file').tag(config=True) - - log_file_path = T.Unicode(None, allow_none=True, - help='if None, will output to {outdir}/{ipynb_name}.nbpub.log').tag(config=True) - - log_file_formatstr = T.Unicode('%(levelname)s:%(name)s:%(message)s').tag(config=True) - - default_ppconfig_kwargs = T.Dict(trait=T.Bool(), - default_value=(('pdf_in_temp', False), ('pdf_debug', False), ('launch_browser', - False)), - help=('convenience arguments for constructing the post-processors ' - 'default configuration')).tag(config=True) - - default_pporder_kwargs = T.Dict(trait=T.Bool(), - default_value=(('dry_run', False), ('clear_existing', False), ('dump_files', False), - ('create_pdf', False), ('serve_html', False), ('slides', False)), - help=('convenience arguments for constructing the post-processors ' - 'default list')).tag(config=True) + return proposal["value"] + + log_to_stdout = T.Bool(True, help="whether to log to sys.stdout").tag(config=True) + + log_level_stdout = T.Enum( + ["debug", "info", "warning", "error", "DEBUG", "INFO", "WARNING", "ERROR"], + default_value="INFO", + help="the logging level to output to stdout", + ).tag(config=True) + + log_stdout_formatstr = T.Unicode("%(levelname)s:%(name)s:%(message)s").tag( + config=True + ) + + log_to_file = T.Bool(False, help="whether to log to file").tag(config=True) + + log_level_file = T.Enum( + ["debug", "info", "warning", "error", "DEBUG", "INFO", "WARNING", "ERROR"], + default_value="INFO", + help="the logging level to output to file", + ).tag(config=True) + + log_file_path = T.Unicode( + None, + allow_none=True, + help="if None, will output to {outdir}/{ipynb_name}.nbpub.log", + ).tag(config=True) + + log_file_formatstr = T.Unicode("%(levelname)s:%(name)s:%(message)s").tag( + config=True + ) + + default_ppconfig_kwargs = T.Dict( + trait=T.Bool(), + default_value=( + ("pdf_in_temp", False), + ("pdf_debug", False), + ("launch_browser", False), + ), + help=( + "convenience arguments for constructing the post-processors " + "default configuration" + ), + ).tag(config=True) + + default_pporder_kwargs = T.Dict( + trait=T.Bool(), + default_value=( + ("dry_run", False), + ("clear_existing", False), + ("dump_files", False), + ("create_pdf", False), + ("serve_html", False), + ("slides", False), + ), + help=( + "convenience arguments for constructing the post-processors " "default list" + ), + ).tag(config=True) # TODO validate that default_ppconfig/pporder_kwargs can be parsed to funcs - default_exporter_config = T.Dict(help='default configuration for exporters').tag(config=True) + default_exporter_config = T.Dict(help="default configuration for exporters").tag( + config=True + ) - @default('default_exporter_config') + @default("default_exporter_config") def _default_exporter_config(self): - temp = '${files_path}/{unique_key}_{cell_index}_{index}{extension}' - return {'ExtractOutputPreprocessor': {'output_filename_template': temp}} + temp = "${files_path}/{unique_key}_{cell_index}_{index}{extension}" + return {"ExtractOutputPreprocessor": {"output_filename_template": temp}} - def _create_default_ppconfig(self, pdf_in_temp=False, pdf_debug=False, launch_browser=False): + def _create_default_ppconfig( + self, pdf_in_temp=False, pdf_debug=False, launch_browser=False + ): """create a default config for postprocessors""" - return Config({ - 'PDFExport': { - 'files_folder': '${files_path}', - 'convert_in_temp': pdf_in_temp, - 'debug_mode': pdf_debug, - 'open_in_browser': launch_browser, - 'skip_mime': False - }, - 'RunSphinx': { - 'open_in_browser': launch_browser, - }, - 'RemoveFolder': { - 'files_folder': '${files_path}' - }, - 'CopyResourcePaths': { - 'files_folder': '${files_path}' - }, - 'ConvertBibGloss': { - 'files_folder': '${files_path}' + return Config( + { + "PDFExport": { + "files_folder": "${files_path}", + "convert_in_temp": pdf_in_temp, + "debug_mode": pdf_debug, + "open_in_browser": launch_browser, + "skip_mime": False, + }, + "RunSphinx": {"open_in_browser": launch_browser}, + "RemoveFolder": {"files_folder": "${files_path}"}, + "CopyResourcePaths": {"files_folder": "${files_path}"}, + "ConvertBibGloss": {"files_folder": "${files_path}"}, } - }) - - def _create_default_pporder(self, - dry_run=False, - clear_existing=False, - dump_files=False, - create_pdf=False, - serve_html=False, - slides=False): + ) + + def _create_default_pporder( + self, + dry_run=False, + clear_existing=False, + dump_files=False, + create_pdf=False, + serve_html=False, + slides=False, + ): """create a default list of postprocessors to run""" - default_pprocs = ['remove-blank-lines', 'remove-trailing-space', 'filter-output-files'] + default_pprocs = [ + "remove-blank-lines", + "remove-trailing-space", + "filter-output-files", + ] if slides: - default_pprocs.append('fix-slide-refs') + default_pprocs.append("fix-slide-refs") if not dry_run: if clear_existing: - default_pprocs.append('remove-folder') - default_pprocs.append('write-text-file') + default_pprocs.append("remove-folder") + default_pprocs.append("write-text-file") if dump_files or create_pdf or serve_html: - default_pprocs.extend(['write-resource-files', 'copy-resource-paths', 'convert-bibgloss']) + default_pprocs.extend( + ["write-resource-files", "copy-resource-paths", "convert-bibgloss"] + ) if create_pdf: - default_pprocs.append('pdf-export') + default_pprocs.append("pdf-export") elif serve_html: - default_pprocs.append('reveal-server') + default_pprocs.append("reveal-server") return default_pprocs @property def logger(self): - return logging.getLogger('ipypublish') + return logging.getLogger("ipypublish") def _setup_logger(self, ipynb_name, outdir): @@ -227,12 +303,12 @@ def _setup_logger(self, ipynb_name, outdir): if self.log_file_path: path = self.log_file_path else: - path = os.path.join(outdir, ipynb_name + '.nbpub.log') + path = os.path.join(outdir, ipynb_name + ".nbpub.log") if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) - flogger = logging.FileHandler(path, 'w') + flogger = logging.FileHandler(path, "w") flogger.setLevel(getattr(logging, self.log_level_file.upper())) formatter = logging.Formatter(self.log_file_formatstr) flogger.setFormatter(formatter) @@ -290,26 +366,46 @@ def publish(self, ipynb_path, nb_node=None): if isinstance(ipynb_path, string_types): ipynb_path = pathlib.Path(ipynb_path) ipynb_name, ipynb_ext = os.path.splitext(ipynb_path.name) - outdir = (os.path.join(os.getcwd(), 'converted') if self.outpath is None else str(self.outpath)) + outdir = ( + os.path.join(os.getcwd(), "converted") + if self.outpath is None + else str(self.outpath) + ) self._setup_logger(ipynb_name, outdir) if not ipynb_path.exists() and not nb_node: - handle_error('the notebook path does not exist: {}'.format(ipynb_path), IOError, self.logger) + handle_error( + "the notebook path does not exist: {}".format(ipynb_path), + IOError, + self.logger, + ) # log start of conversion - self.logger.info('started ipypublish v{0} at {1}'.format(ipypublish.__version__, time.strftime('%c'))) - self.logger.info('logging to: {}'.format(os.path.join(outdir, ipynb_name + '.nbpub.log'))) - self.logger.info('running for ipynb(s) at: {0}'.format(ipynb_path)) - self.logger.info('with conversion configuration: {0}'.format(self.conversion)) + self.logger.info( + "started ipypublish v{0} at {1}".format( + ipypublish.__version__, time.strftime("%c") + ) + ) + self.logger.info( + "logging to: {}".format(os.path.join(outdir, ipynb_name + ".nbpub.log")) + ) + self.logger.info("running for ipynb(s) at: {0}".format(ipynb_path)) + self.logger.info("with conversion configuration: {0}".format(self.conversion)) if nb_node is None and ipynb_ext in self.pre_conversion_funcs: func = self.pre_conversion_funcs[ipynb_ext] - self.logger.info('running pre-conversion with: {}'.format(inspect.getmodule(func))) + self.logger.info( + "running pre-conversion with: {}".format(inspect.getmodule(func)) + ) try: nb_node = func(ipynb_path) except Exception as err: - handle_error('pre-conversion failed for {}: {}'.format(ipynb_path, err), err, self.logger) + handle_error( + "pre-conversion failed for {}: {}".format(ipynb_path, err), + err, + self.logger, + ) # doesn't work with folders # if (ipynb_ext != ".ipynb" and nb_node is None): @@ -324,66 +420,89 @@ def publish(self, ipynb_path, nb_node=None): # (would require creating a main.tex with the preamble in etc ) # Could make everything a 'PyProcess', # with support for multiple streams - final_nb, meta_path = merge_notebooks(ipynb_path, ignore_prefix=self.ignore_prefix) + final_nb, meta_path = merge_notebooks( + ipynb_path, ignore_prefix=self.ignore_prefix + ) else: final_nb, meta_path = (nb_node, ipynb_path) # valdate the notebook metadata against the schema if self.validate_nb_metadata: - nb_metadata_schema = read_file_from_directory(get_module_path(schema), - 'doc_metadata.schema.json', - 'doc_metadata.schema', - self.logger, - interp_ext=True) + nb_metadata_schema = read_file_from_directory( + get_module_path(schema), + "doc_metadata.schema.json", + "doc_metadata.schema", + self.logger, + interp_ext=True, + ) try: jsonschema.validate(final_nb.metadata, nb_metadata_schema) except jsonschema.ValidationError as err: - handle_error('validation of notebook level metadata failed: {}\n' - 'see the doc_metadata.schema.json for full spec'.format(err.message), - jsonschema.ValidationError, - logger=self.logger) + handle_error( + "validation of notebook level metadata failed: {}\n" + "see the doc_metadata.schema.json for full spec".format( + err.message + ), + jsonschema.ValidationError, + logger=self.logger, + ) # set text replacements for export configuration replacements = { self.meta_path_placeholder: str(meta_path), - self.files_folder_placeholder: '{}{}'.format(get_valid_filename(ipynb_name), self.folder_suffix) + self.files_folder_placeholder: "{}{}".format( + get_valid_filename(ipynb_name), self.folder_suffix + ), } - self.logger.debug('notebooks meta path: {}'.format(meta_path)) + self.logger.debug("notebooks meta path: {}".format(meta_path)) # load configuration file - (exporter_cls, jinja_template, econfig, pprocs, pconfig) = self._load_config_file(replacements) + ( + exporter_cls, + jinja_template, + econfig, + pprocs, + pconfig, + ) = self._load_config_file(replacements) # run nbconvert - self.logger.info('running nbconvert') - exporter, stream, resources = self.export_notebook(final_nb, exporter_cls, econfig, jinja_template) + self.logger.info("running nbconvert") + exporter, stream, resources = self.export_notebook( + final_nb, exporter_cls, econfig, jinja_template + ) # record if the notebook contains widgets (for use by sphinx) - if 'application/vnd.jupyter.widget-state+json' in final_nb.metadata.get('widgets', {}): - resources['contains_ipywidgets'] = True + if "application/vnd.jupyter.widget-state+json" in final_nb.metadata.get( + "widgets", {} + ): + resources["contains_ipywidgets"] = True # postprocess results main_filepath = os.path.join(outdir, ipynb_name + exporter.file_extension) for post_proc_name in pprocs: - proc_class = find_entry_point(post_proc_name, 'ipypublish.postprocessors', self.logger, 'ipypublish') + proc_class = find_entry_point( + post_proc_name, "ipypublish.postprocessors", self.logger, "ipypublish" + ) proc = proc_class(pconfig) - stream, main_filepath, resources = proc.postprocess(stream, exporter.output_mimetype, main_filepath, - resources) + stream, main_filepath, resources = proc.postprocess( + stream, exporter.output_mimetype, main_filepath, resources + ) - self.logger.info('process finished successfully') + self.logger.info("process finished successfully") return { - 'outpath': outdir, - 'exporter': exporter, - 'stream': stream, - 'main_filepath': main_filepath, - 'resources': resources + "outpath": outdir, + "exporter": exporter, + "stream": stream, + "main_filepath": main_filepath, + "resources": resources, } def _load_config_file(self, replacements): # find conversion configuration - self.logger.info('finding conversion configuration: {}'.format(self.conversion)) + self.logger.info("finding conversion configuration: {}".format(self.conversion)) export_config_path = None if isinstance(self.conversion, string_types): outformat_path = pathlib.Path(self.conversion) @@ -394,43 +513,53 @@ def _load_config_file(self, replacements): export_config_path = outformat_path else: # else search internally - export_config_path = get_export_config_path(self.conversion, self.plugin_folder_paths) + export_config_path = get_export_config_path( + self.conversion, self.plugin_folder_paths + ) if export_config_path is None: - handle_error('could not find conversion configuration: {}'.format(self.conversion), IOError, self.logger) + handle_error( + "could not find conversion configuration: {}".format(self.conversion), + IOError, + self.logger, + ) # read conversion configuration and create - self.logger.info('loading conversion configuration') + self.logger.info("loading conversion configuration") data = load_export_config(export_config_path) - self.logger.info('creating exporter') - exporter_cls = create_exporter_cls(data['exporter']['class']) - self.logger.info('creating template and loading filters') - template_name = 'template_file' - jinja_template = load_template(template_name, data['template']) - self.logger.info('creating process configuration') - export_config = self._create_export_config(data['exporter'], template_name, replacements) - pprocs, pproc_config = self._create_pproc_config(data.get('postprocessors', {}), replacements) + self.logger.info("creating exporter") + exporter_cls = create_exporter_cls(data["exporter"]["class"]) + self.logger.info("creating template and loading filters") + template_name = "template_file" + jinja_template = load_template(template_name, data["template"]) + self.logger.info("creating process configuration") + export_config = self._create_export_config( + data["exporter"], template_name, replacements + ) + pprocs, pproc_config = self._create_pproc_config( + data.get("postprocessors", {}), replacements + ) return (exporter_cls, jinja_template, export_config, pprocs, pproc_config) def _create_export_config(self, exporter_data, template_name, replacements): # type: (dict, Dict[str, str]) -> Config config = {} - exporter_name = exporter_data['class'].split('.')[-1] + exporter_name = exporter_data["class"].split(".")[-1] - config[exporter_name + '.template_file'] = template_name - config[exporter_name + '.filters'] = exporter_data.get('filters', []) + config[exporter_name + ".template_file"] = template_name + config[exporter_name + ".filters"] = exporter_data.get("filters", []) preprocessors = [] - for preproc in exporter_data.get('preprocessors', []): - preprocessors.append(preproc['class']) - preproc_name = preproc['class'].split('.')[-1] - for name, val in preproc.get('args', {}).items(): - config[preproc_name + '.' + name] = val + for preproc in exporter_data.get("preprocessors", []): + preprocessors.append(preproc["class"]) + preproc_name = preproc["class"].split(".")[-1] + for name, val in preproc.get("args", {}).items(): + config[preproc_name + "." + name] = val - config[exporter_name + '.preprocessors'] = preprocessors + config[exporter_name + ".preprocessors"] = preprocessors - for name, val in exporter_data.get('other_args', {}).items(): + for name, val in exporter_data.get("other_args", {}).items(): config[name] = val final_config = self.default_exporter_config @@ -442,15 +571,15 @@ def _create_export_config(self, exporter_data, template_name, replacements): def _create_pproc_config(self, pproc_data, replacements): - if 'order' in pproc_data: - pprocs_list = pproc_data['order'] + if "order" in pproc_data: + pprocs_list = pproc_data["order"] else: pprocs_list = self._create_default_pporder(**self.default_pporder_kwargs) pproc_config = self._create_default_ppconfig(**self.default_ppconfig_kwargs) - if 'config' in pproc_data: - override_config = pproc_data['config'] + if "config" in pproc_data: + override_config = pproc_data["config"] pproc_config.update(override_config) replace_placeholders(pproc_config, replacements) @@ -459,13 +588,16 @@ def _create_pproc_config(self, pproc_data, replacements): def export_notebook(self, final_nb, exporter_cls, config, jinja_template): - kwargs = {'config': config} + kwargs = {"config": config} if jinja_template is not None: - kwargs['extra_loaders'] = [jinja_template] + kwargs["extra_loaders"] = [jinja_template] try: exporter = exporter_cls(**kwargs) except TypeError: - self.logger.warning('the exporter class can not be parsed ' 'the arguments: {}'.format(list(kwargs.keys()))) + self.logger.warning( + "the exporter class can not be parsed " + "the arguments: {}".format(list(kwargs.keys())) + ) exporter = exporter_cls() body, resources = exporter.from_notebook_node(final_nb) @@ -488,5 +620,5 @@ def replace_placeholders(mapping, replacements): for instr, outstr in replacements.items(): val = val.replace(instr, outstr) mapping[key] = val - elif hasattr(val, 'items'): + elif hasattr(val, "items"): replace_placeholders(val, replacements) diff --git a/ipypublish/convert/nbmerge.py b/ipypublish/convert/nbmerge.py index d0d4a93..85541b5 100755 --- a/ipypublish/convert/nbmerge.py +++ b/ipypublish/convert/nbmerge.py @@ -29,15 +29,17 @@ def alphanumeric_sort(l): l: list[str] """ - def convert(text): return int(text) if text.isdigit() else text.lower() - def alphanum_key(key): return [convert(c) - for c in re.split('([0-9]+)', key.name)] + def convert(text): + return int(text) if text.isdigit() else text.lower() + + def alphanum_key(key): + return [convert(c) for c in re.split("([0-9]+)", key.name)] + return sorted(l, key=alphanum_key) -def merge_notebooks(ipynb_path, ignore_prefix='_', - to_str=False, as_version=4): +def merge_notebooks(ipynb_path, ignore_prefix="_", to_str=False, as_version=4): """ merge one or more ipynb's, if more than one, then the meta data is taken from the first @@ -58,23 +60,26 @@ def merge_notebooks(ipynb_path, ignore_prefix='_', path to notebook containing meta file """ - meta_path = '' + meta_path = "" if isinstance(ipynb_path, string_types): ipynb_path = pathlib.Path(ipynb_path) if not ipynb_path.exists(): - handle_error('the notebook path does not exist: {}'.format( - ipynb_path), IOError, logger) + handle_error( + "the notebook path does not exist: {}".format(ipynb_path), IOError, logger + ) final_nb = None if ipynb_path.is_dir(): - logger.info('Merging all notebooks in directory') - for ipath in alphanumeric_sort(ipynb_path.glob('*.ipynb')): + logger.info("Merging all notebooks in directory") + for ipath in alphanumeric_sort(ipynb_path.glob("*.ipynb")): if os.path.basename(ipath.name).startswith(ignore_prefix): continue - with ipath.open('r', encoding='utf-8') as f: - if (sys.version_info.major == 3 + with ipath.open("r", encoding="utf-8") as f: + if ( + sys.version_info.major == 3 and sys.version_info.minor < 6 - and "win" not in sys.platform): + and "win" not in sys.platform + ): data = f.read() if hasattr(data, "decode"): data = data.decode("utf-8") @@ -87,11 +92,13 @@ def merge_notebooks(ipynb_path, ignore_prefix='_', else: final_nb.cells.extend(nb.cells) else: - logger.info('Reading notebook') - with ipynb_path.open('r', encoding='utf-8') as f: - if (sys.version_info.major == 3 + logger.info("Reading notebook") + with ipynb_path.open("r", encoding="utf-8") as f: + if ( + sys.version_info.major == 3 and sys.version_info.minor < 6 - and "win" not in sys.platform): + and "win" not in sys.platform + ): data = f.read() if hasattr(data, "decode"): data = data.decode("utf-8") @@ -99,18 +106,21 @@ def merge_notebooks(ipynb_path, ignore_prefix='_', else: final_nb = nbformat.read(f, as_version=as_version) meta_path = ipynb_path - if not hasattr(final_nb.metadata, 'name'): - final_nb.metadata.name = '' + if not hasattr(final_nb.metadata, "name"): + final_nb.metadata.name = "" final_nb.metadata.name += "_merged" if to_str: if sys.version_info > (3, 0): return nbformat.writes(final_nb) else: - return nbformat.writes(final_nb).encode('utf-8') + return nbformat.writes(final_nb).encode("utf-8") if final_nb is None: - handle_error('no acceptable notebooks found for path: {}'.format( - ipynb_path.name), IOError, logger) + handle_error( + "no acceptable notebooks found for path: {}".format(ipynb_path.name), + IOError, + logger, + ) return final_nb, meta_path diff --git a/ipypublish/export_plugins/html_ipypublish_all.json b/ipypublish/export_plugins/html_ipypublish_all.json index cd7ea57..649fd69 100644 --- a/ipypublish/export_plugins/html_ipypublish_all.json +++ b/ipypublish/export_plugins/html_ipypublish_all.json @@ -115,4 +115,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/html_ipypublish_main.json b/ipypublish/export_plugins/html_ipypublish_main.json index bf6faa2..dd39c55 100644 --- a/ipypublish/export_plugins/html_ipypublish_main.json +++ b/ipypublish/export_plugins/html_ipypublish_main.json @@ -85,4 +85,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/html_ipypublish_nocode.json b/ipypublish/export_plugins/html_ipypublish_nocode.json index 370a000..fcf8251 100644 --- a/ipypublish/export_plugins/html_ipypublish_nocode.json +++ b/ipypublish/export_plugins/html_ipypublish_nocode.json @@ -114,4 +114,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/html_standard.json b/ipypublish/export_plugins/html_standard.json index a17aab1..647d034 100644 --- a/ipypublish/export_plugins/html_standard.json +++ b/ipypublish/export_plugins/html_standard.json @@ -40,4 +40,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/latex_ipypublish_all.exec.json b/ipypublish/export_plugins/latex_ipypublish_all.exec.json index 0321762..3192a21 100644 --- a/ipypublish/export_plugins/latex_ipypublish_all.exec.json +++ b/ipypublish/export_plugins/latex_ipypublish_all.exec.json @@ -74,7 +74,7 @@ "allow_errors": false, "interrupt_on_timeout": false, "kernel_name": "" - } + } } }, "template": { @@ -117,4 +117,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/latex_ipypublish_all.json b/ipypublish/export_plugins/latex_ipypublish_all.json index b1ed41a..2f0ebec 100644 --- a/ipypublish/export_plugins/latex_ipypublish_all.json +++ b/ipypublish/export_plugins/latex_ipypublish_all.json @@ -109,4 +109,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/latex_ipypublish_main.json b/ipypublish/export_plugins/latex_ipypublish_main.json index ba1e698..4271978 100644 --- a/ipypublish/export_plugins/latex_ipypublish_main.json +++ b/ipypublish/export_plugins/latex_ipypublish_main.json @@ -78,4 +78,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/latex_ipypublish_nocode.json b/ipypublish/export_plugins/latex_ipypublish_nocode.json index e9d2a0b..d4bf0bb 100644 --- a/ipypublish/export_plugins/latex_ipypublish_nocode.json +++ b/ipypublish/export_plugins/latex_ipypublish_nocode.json @@ -111,4 +111,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/latex_standard_article.json b/ipypublish/export_plugins/latex_standard_article.json index a8e0feb..0d8aa60 100644 --- a/ipypublish/export_plugins/latex_standard_article.json +++ b/ipypublish/export_plugins/latex_standard_article.json @@ -36,4 +36,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/nb_ipypublish_all.json b/ipypublish/export_plugins/nb_ipypublish_all.json index c259a21..1b552e5 100644 --- a/ipypublish/export_plugins/nb_ipypublish_all.json +++ b/ipypublish/export_plugins/nb_ipypublish_all.json @@ -48,4 +48,4 @@ "other_args": {} }, "template": null -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/nb_ipypublish_nocode.json b/ipypublish/export_plugins/nb_ipypublish_nocode.json index 64e81a5..493a4de 100644 --- a/ipypublish/export_plugins/nb_ipypublish_nocode.json +++ b/ipypublish/export_plugins/nb_ipypublish_nocode.json @@ -48,4 +48,4 @@ "other_args": {} }, "template": null -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/python_with_meta.json b/ipypublish/export_plugins/python_with_meta.json index b01a84b..d2406e8 100644 --- a/ipypublish/export_plugins/python_with_meta.json +++ b/ipypublish/export_plugins/python_with_meta.json @@ -17,4 +17,4 @@ "file": "python_outline.py.j2" } } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/python_with_meta_stream.json b/ipypublish/export_plugins/python_with_meta_stream.json index c00bbb7..4eaea1b 100644 --- a/ipypublish/export_plugins/python_with_meta_stream.json +++ b/ipypublish/export_plugins/python_with_meta_stream.json @@ -29,4 +29,4 @@ } } } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/slides_ipypublish_all.json b/ipypublish/export_plugins/slides_ipypublish_all.json index d8a1808..a3bc5cb 100644 --- a/ipypublish/export_plugins/slides_ipypublish_all.json +++ b/ipypublish/export_plugins/slides_ipypublish_all.json @@ -121,4 +121,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/slides_ipypublish_main.json b/ipypublish/export_plugins/slides_ipypublish_main.json index c3c7c5f..9d17adc 100644 --- a/ipypublish/export_plugins/slides_ipypublish_main.json +++ b/ipypublish/export_plugins/slides_ipypublish_main.json @@ -90,4 +90,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/slides_ipypublish_nocode.json b/ipypublish/export_plugins/slides_ipypublish_nocode.json index 0f432be..2c53768 100644 --- a/ipypublish/export_plugins/slides_ipypublish_nocode.json +++ b/ipypublish/export_plugins/slides_ipypublish_nocode.json @@ -121,4 +121,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/slides_mkdown_all.json b/ipypublish/export_plugins/slides_mkdown_all.json index ba4e1c0..a2c81f1 100644 --- a/ipypublish/export_plugins/slides_mkdown_all.json +++ b/ipypublish/export_plugins/slides_mkdown_all.json @@ -101,4 +101,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/slides_mkdown_main.json b/ipypublish/export_plugins/slides_mkdown_main.json index d77a25a..1ad9ff9 100644 --- a/ipypublish/export_plugins/slides_mkdown_main.json +++ b/ipypublish/export_plugins/slides_mkdown_main.json @@ -71,4 +71,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/slides_mkdown_nocode.json b/ipypublish/export_plugins/slides_mkdown_nocode.json index e64d83d..a30730e 100644 --- a/ipypublish/export_plugins/slides_mkdown_nocode.json +++ b/ipypublish/export_plugins/slides_mkdown_nocode.json @@ -101,4 +101,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/slides_standard.json b/ipypublish/export_plugins/slides_standard.json index d31bff0..2103460 100644 --- a/ipypublish/export_plugins/slides_standard.json +++ b/ipypublish/export_plugins/slides_standard.json @@ -36,4 +36,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/sphinx_ipypublish_all.exec.json b/ipypublish/export_plugins/sphinx_ipypublish_all.exec.json index 1d8fcc9..84d7cb9 100644 --- a/ipypublish/export_plugins/sphinx_ipypublish_all.exec.json +++ b/ipypublish/export_plugins/sphinx_ipypublish_all.exec.json @@ -74,7 +74,7 @@ "allow_errors": false, "interrupt_on_timeout": false, "kernel_name": "" - } + } } }, "template": { @@ -112,4 +112,4 @@ } } } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/sphinx_ipypublish_all.ext.json b/ipypublish/export_plugins/sphinx_ipypublish_all.ext.json index 99f67f9..45fa477 100644 --- a/ipypublish/export_plugins/sphinx_ipypublish_all.ext.json +++ b/ipypublish/export_plugins/sphinx_ipypublish_all.ext.json @@ -102,4 +102,4 @@ "write-resource-files" ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/sphinx_ipypublish_all.ext.noexec.json b/ipypublish/export_plugins/sphinx_ipypublish_all.ext.noexec.json index 9909518..60b7cd4 100644 --- a/ipypublish/export_plugins/sphinx_ipypublish_all.ext.noexec.json +++ b/ipypublish/export_plugins/sphinx_ipypublish_all.ext.noexec.json @@ -98,4 +98,4 @@ "write-resource-files" ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/sphinx_ipypublish_all.json b/ipypublish/export_plugins/sphinx_ipypublish_all.json index 4469efc..7b6bf4f 100644 --- a/ipypublish/export_plugins/sphinx_ipypublish_all.json +++ b/ipypublish/export_plugins/sphinx_ipypublish_all.json @@ -80,4 +80,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/sphinx_ipypublish_all.run.json b/ipypublish/export_plugins/sphinx_ipypublish_all.run.json index dc59150..e3374e6 100644 --- a/ipypublish/export_plugins/sphinx_ipypublish_all.run.json +++ b/ipypublish/export_plugins/sphinx_ipypublish_all.run.json @@ -104,4 +104,4 @@ } } } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/sphinx_ipypublish_main.json b/ipypublish/export_plugins/sphinx_ipypublish_main.json index 8b4b402..551a1e6 100644 --- a/ipypublish/export_plugins/sphinx_ipypublish_main.json +++ b/ipypublish/export_plugins/sphinx_ipypublish_main.json @@ -33,7 +33,7 @@ { "class": "ipypublish.preprocessors.latex_doc_captions.LatexCaptions", "args": {} - } + } ], "other_args": {} }, @@ -49,4 +49,4 @@ } ] } - } \ No newline at end of file + } diff --git a/ipypublish/export_plugins/sphinx_ipypublish_main.run.json b/ipypublish/export_plugins/sphinx_ipypublish_main.run.json index b6f8bbf..b3f6aa3 100644 --- a/ipypublish/export_plugins/sphinx_ipypublish_main.run.json +++ b/ipypublish/export_plugins/sphinx_ipypublish_main.run.json @@ -33,7 +33,7 @@ { "class": "ipypublish.preprocessors.latex_doc_captions.LatexCaptions", "args": {} - } + } ], "other_args": {} }, @@ -72,4 +72,4 @@ } } } - } \ No newline at end of file + } diff --git a/ipypublish/export_plugins/sphinx_ipypublish_nocode.json b/ipypublish/export_plugins/sphinx_ipypublish_nocode.json index 874ed57..9eaabf7 100644 --- a/ipypublish/export_plugins/sphinx_ipypublish_nocode.json +++ b/ipypublish/export_plugins/sphinx_ipypublish_nocode.json @@ -80,4 +80,4 @@ } ] } -} \ No newline at end of file +} diff --git a/ipypublish/export_plugins/sphinx_nbsphinx.json b/ipypublish/export_plugins/sphinx_nbsphinx.json index 9448520..ee68f11 100644 --- a/ipypublish/export_plugins/sphinx_nbsphinx.json +++ b/ipypublish/export_plugins/sphinx_nbsphinx.json @@ -9,4 +9,4 @@ "other_args": {} }, "template": null - } \ No newline at end of file + } diff --git a/ipypublish/export_plugins/sphinx_standard.json b/ipypublish/export_plugins/sphinx_standard.json index 58c4292..413a678 100644 --- a/ipypublish/export_plugins/sphinx_standard.json +++ b/ipypublish/export_plugins/sphinx_standard.json @@ -20,4 +20,4 @@ } ] } - } \ No newline at end of file + } diff --git a/ipypublish/filters/ansi_listings.py b/ipypublish/filters/ansi_listings.py index 0f40d7e..c671d62 100644 --- a/ipypublish/filters/ansi_listings.py +++ b/ipypublish/filters/ansi_listings.py @@ -7,33 +7,31 @@ from nbconvert.filters.latex import escape_latex -__all__ = [ - 'ansi2listings', -] +__all__ = ["ansi2listings"] -_ANSI_RE = re.compile('\x1b\\[(.*?)([@-~])') +_ANSI_RE = re.compile("\x1b\\[(.*?)([@-~])") _ANSI_COLORS = ( - 'ansi-black', - 'ansi-red', - 'ansi-green', - 'ansi-yellow', - 'ansi-blue', - 'ansi-magenta', - 'ansi-cyan', - 'ansi-white', - 'ansi-black-intense', - 'ansi-red-intense', - 'ansi-green-intense', - 'ansi-yellow-intense', - 'ansi-blue-intense', - 'ansi-magenta-intense', - 'ansi-cyan-intense', - 'ansi-white-intense', + "ansi-black", + "ansi-red", + "ansi-green", + "ansi-yellow", + "ansi-blue", + "ansi-magenta", + "ansi-cyan", + "ansi-white", + "ansi-black-intense", + "ansi-red-intense", + "ansi-green-intense", + "ansi-yellow-intense", + "ansi-blue-intense", + "ansi-magenta-intense", + "ansi-cyan-intense", + "ansi-white-intense", ) -def ansi2listings(text, escapechar='%'): +def ansi2listings(text, escapechar="%"): """ Convert ANSI colors to LaTeX colors. @@ -65,33 +63,33 @@ def _latexconverter(fg, bg, bold, escapechar): """ if (fg, bg, bold) == (None, None, False): - return '', '' + return "", "" - starttag, endtag = '', '' + starttag, endtag = "", "" if isinstance(fg, int): - starttag += r'\textcolor{' + _ANSI_COLORS[fg] + '}{' - endtag = '}' + endtag + starttag += r"\textcolor{" + _ANSI_COLORS[fg] + "}{" + endtag = "}" + endtag elif fg: # See http://tex.stackexchange.com/a/291102/13684 - starttag += r'\def\tcRGB{\textcolor[RGB]}\expandafter' - starttag += r'\tcRGB\expandafter{\detokenize{%s,%s,%s}}{' % fg - endtag = '}' + endtag + starttag += r"\def\tcRGB{\textcolor[RGB]}\expandafter" + starttag += r"\tcRGB\expandafter{\detokenize{%s,%s,%s}}{" % fg + endtag = "}" + endtag if isinstance(bg, int): - starttag += r'\setlength{\fboxsep}{0pt}\colorbox{' - starttag += _ANSI_COLORS[bg] + '}{' - endtag = r'\strut}' + endtag + starttag += r"\setlength{\fboxsep}{0pt}\colorbox{" + starttag += _ANSI_COLORS[bg] + "}{" + endtag = r"\strut}" + endtag elif bg: - starttag += r'\setlength{\fboxsep}{0pt}' + starttag += r"\setlength{\fboxsep}{0pt}" # See http://tex.stackexchange.com/a/291102/13684 - starttag += r'\def\cbRGB{\colorbox[RGB]}\expandafter' - starttag += r'\cbRGB\expandafter{\detokenize{%s,%s,%s}}{' % bg - endtag = r'\strut}' + endtag + starttag += r"\def\cbRGB{\colorbox[RGB]}\expandafter" + starttag += r"\cbRGB\expandafter{\detokenize{%s,%s,%s}}{" % bg + endtag = r"\strut}" + endtag if bold: - starttag += r'\textbf{' - endtag = '}' + endtag + starttag += r"\textbf{" + endtag = "}" + endtag starttag = escapechar + starttag endtag += escapechar @@ -128,17 +126,16 @@ def _ansi2anything(text, converter, escapechar): while text: m = _ANSI_RE.search(text) if m: - if m.group(2) == 'm': + if m.group(2) == "m": try: - numbers = [int(n) if n else 0 - for n in m.group(1).split(';')] + numbers = [int(n) if n else 0 for n in m.group(1).split(";")] except ValueError: pass # Invalid color specification else: pass # Not a color code - chunk, text = text[:m.start()], text[m.end():] + chunk, text = text[: m.start()], text[m.end() :] else: - chunk, text = text, '' + chunk, text = text, "" if chunk: if bold and fg in range(8): @@ -183,7 +180,7 @@ def _ansi2anything(text, converter, escapechar): bg = n - 100 + 8 else: pass # Unknown codes are ignored - return ''.join(out) + return "".join(out) def _get_extended_color(numbers): diff --git a/ipypublish/filters/filters.py b/ipypublish/filters/filters.py index 924c5be..6166eab 100644 --- a/ipypublish/filters/filters.py +++ b/ipypublish/filters/filters.py @@ -17,8 +17,8 @@ def basename(path, ext=False): def get_empty_lines(text): """Get number of empty lines before and after text.""" - before = len(text) - len(text.lstrip('\n')) - after = len(text) - len(text.strip('\n')) - before + before = len(text) - len(text.lstrip("\n")) + after = len(text) - len(text.strip("\n")) - before return before, after @@ -26,20 +26,20 @@ def wrap_latex(input, max_length=75, **kwargs): if len(input) > max_length: # remove double dollars, as they don't allow word wrap if len(input) > 3: - if input[0:2] == '$$' and input[-2:] == '$$': + if input[0:2] == "$$" and input[-2:] == "$$": input = input[1:-1] # change \left( and \right) to \bigg( and \bigg), as allow word wrap - input = input.replace(r'\left(', r'\big(') - input = input.replace(r'\right)', r'\big)') + input = input.replace(r"\left(", r"\big(") + input = input.replace(r"\right)", r"\big)") return input def remove_dollars(text): """remove dollars from start/end of text""" - while text.startswith('$'): + while text.startswith("$"): text = text[1:] - while text.endswith('$'): + while text.endswith("$"): text = text[0:-1] return text @@ -71,11 +71,9 @@ def wrap_eqn(text, cell_meta, nb_meta, out="latex"): environment = None if environment == "none": environment = None - elif environment in ["equation*", "align*", "multline*", - "gather*", "eqnarray*"]: + elif environment in ["equation*", "align*", "multline*", "gather*", "eqnarray*"]: numbered = False - elif environment in ["equation", "align", - "multline", "gather", "eqnarray"]: + elif environment in ["equation", "align", "multline", "gather", "eqnarray"]: pass elif environment == "breqn" and out == "latex": if nb_meta.get("ipub", {}).get("enable_breqn", False): @@ -134,7 +132,7 @@ def get_caption(etype, cell_meta, resources): def first_para(input, **kwargs): r"""get only ttext before a \n (i.e. the fist paragraph)""" - return input.split('\n')[0] + return input.split("\n")[0] def _write_roman(num): @@ -157,7 +155,7 @@ def roman_num(num): for r in roman.keys(): x, y = divmod(num, r) yield roman[r] * x - num -= (r * x) + num -= r * x if num > 0: roman_num(num) else: @@ -180,25 +178,25 @@ def create_key(input, **kwargs): """ input = re.compile(r"\d+").sub(_repl, input) - input = input.replace(':', 'c') - input = input.replace(';', 'c') - input = input.replace('_', 'u') - return re.sub('[^a-zA-Z]+', '', str(input)).lower() + input = input.replace(":", "c") + input = input.replace(";", "c") + input = input.replace("_", "u") + return re.sub("[^a-zA-Z]+", "", str(input)).lower() def _split_option(item, original): opt = item.split("=") if len(opt) > 2: raise ValueError( - "item '{}' from '{}' contains multiple '='".format( - item, original)) + "item '{}' from '{}' contains multiple '='".format(item, original) + ) elif len(opt) == 1: return opt[0].strip(), None else: return [o.strip() for o in opt] -def dict_to_kwds(inobject, kwdstr='', overwrite=True): +def dict_to_kwds(inobject, kwdstr="", overwrite=True): """ convert a dictionary to a string of keywords, or, if a list, a string of options @@ -230,9 +228,8 @@ def dict_to_kwds(inobject, kwdstr='', overwrite=True): ikey, ival = _split_option(item, kwdstr) if ikey in optdict: raise ValueError( - "kwdstr '{}' contain multiple references to '{}'".format( - kwdstr, ikey - )) + "kwdstr '{}' contain multiple references to '{}'".format(kwdstr, ikey) + ) optdict[ikey] = ival if isinstance(inobject, (list, tuple)): @@ -242,7 +239,9 @@ def dict_to_kwds(inobject, kwdstr='', overwrite=True): if not isinstance(item, string_types): raise ValueError( "option '{}' from option list is not a string: {}".format( - item, kwdstr)) + item, kwdstr + ) + ) okey, oval = _split_option(item, inobject) if okey not in optdict or overwrite: optdict[okey] = oval @@ -268,13 +267,25 @@ def is_equation(text): """test if a piece of text is a latex equation, by how it is wrapped""" text = text.strip() - if any([text.startswith('\\begin{{{0}}}'.format(env)) - and text.endswith('\\end{{{0}}}'.format(env)) - for env in - ['equation', 'split', 'equation*', 'align', 'align*', - 'multline', 'multline*', 'gather', 'gather*']]): + if any( + [ + text.startswith("\\begin{{{0}}}".format(env)) + and text.endswith("\\end{{{0}}}".format(env)) + for env in [ + "equation", + "split", + "equation*", + "align", + "align*", + "multline", + "multline*", + "gather", + "gather*", + ] + ] + ): return True - elif text.startswith('$') and text.endswith('$'): + elif text.startswith("$") and text.endswith("$"): return True else: return False @@ -282,4 +293,4 @@ def is_equation(text): if __name__ == "__main__": - print(dict_to_kwds(['a', 'c'], 'e,b,d=3')) + print(dict_to_kwds(["a", "c"], "e,b,d=3")) diff --git a/ipypublish/filters/meta_to_yaml.py b/ipypublish/filters/meta_to_yaml.py index 693ca2a..b17212e 100644 --- a/ipypublish/filters/meta_to_yaml.py +++ b/ipypublish/filters/meta_to_yaml.py @@ -1,4 +1,3 @@ - import ruamel.yaml as yaml from nbformat.notebooknode import NotebookNode diff --git a/ipypublish/filters/rst_choose_output.py b/ipypublish/filters/rst_choose_output.py index e0af3f2..0eedbf4 100644 --- a/ipypublish/filters/rst_choose_output.py +++ b/ipypublish/filters/rst_choose_output.py @@ -1,55 +1,55 @@ # See nbconvert/exporters/html.py: DISPLAY_DATA_PRIORITY_HTML = ( - 'application/vnd.jupyter.widget-state+json', - 'application/vnd.jupyter.widget-view+json', - 'application/javascript', - 'text/html', - 'text/markdown', - 'image/svg+xml', - 'text/latex', - 'image/png', - 'image/jpeg', - 'text/plain', + "application/vnd.jupyter.widget-state+json", + "application/vnd.jupyter.widget-view+json", + "application/javascript", + "text/html", + "text/markdown", + "image/svg+xml", + "text/latex", + "image/png", + "image/jpeg", + "text/plain", ) # See nbconvert/exporters/latex.py: DISPLAY_DATA_PRIORITY_LATEX = ( - 'text/latex', - 'image/svg+xml', # this requires sphinx.ext.imgconverter - 'application/pdf', - 'image/png', - 'image/jpeg', - 'text/markdown', - 'text/plain', + "text/latex", + "image/svg+xml", # this requires sphinx.ext.imgconverter + "application/pdf", + "image/png", + "image/jpeg", + "text/markdown", + "text/plain", ) def choose_output_type(output, metadata): """Choose appropriate output data types for HTML and LaTeX.""" - if output.output_type == 'stream': - html_datatype = latex_datatype = 'ansi' + if output.output_type == "stream": + html_datatype = latex_datatype = "ansi" text = output.text - output.data = {'ansi': text[:-1] if text.endswith('\n') else text} - elif output.output_type == 'error': - html_datatype = latex_datatype = 'ansi' - output.data = {'ansi': '\n'.join(output.traceback)} + output.data = {"ansi": text[:-1] if text.endswith("\n") else text} + elif output.output_type == "error": + html_datatype = latex_datatype = "ansi" + output.data = {"ansi": "\n".join(output.traceback)} else: for datatype in DISPLAY_DATA_PRIORITY_HTML: if datatype in output.data: html_datatype = datatype break else: - html_datatype = ', '.join(output.data.keys()) + html_datatype = ", ".join(output.data.keys()) for datatype in DISPLAY_DATA_PRIORITY_LATEX: if datatype in output.data: latex_datatype = datatype break else: - latex_datatype = ', '.join(output.data.keys()) + latex_datatype = ", ".join(output.data.keys()) if html_datatype in [ - 'application/vnd.jupyter.widget-state+json', - 'application/vnd.jupyter.widget-view+json', - 'application/javascript', + "application/vnd.jupyter.widget-state+json", + "application/vnd.jupyter.widget-view+json", + "application/javascript", ]: return html_datatype, latex_datatype diff --git a/ipypublish/filters_pandoc/.gitignore b/ipypublish/filters_pandoc/.gitignore index 8b77f12..a012ec2 100644 --- a/ipypublish/filters_pandoc/.gitignore +++ b/ipypublish/filters_pandoc/.gitignore @@ -1 +1 @@ -archive/ \ No newline at end of file +archive/ diff --git a/ipypublish/filters_pandoc/__init__.py b/ipypublish/filters_pandoc/__init__.py index 02798c6..4463a7f 100644 --- a/ipypublish/filters_pandoc/__init__.py +++ b/ipypublish/filters_pandoc/__init__.py @@ -4,6 +4,7 @@ # import sys from six import string_types import panflute as pf + pf.elements.RAW_FORMATS.add("latex") pf.elements.RAW_FORMATS.add("tex") pf.elements.RAW_FORMATS.add("rst") @@ -28,9 +29,18 @@ def builtin2meta(val): return pf.MetaBlocks(val) elif isinstance(val, pf.Inline): return pf.MetaInlines(val) - elif isinstance(val, (pf.MetaBool, pf.MetaString, pf.MetaValue, - pf.MetaList, pf.MetaMap, pf.MetaBlocks, - pf.MetaInlines)): + elif isinstance( + val, + ( + pf.MetaBool, + pf.MetaString, + pf.MetaValue, + pf.MetaList, + pf.MetaMap, + pf.MetaBlocks, + pf.MetaInlines, + ), + ): return val raise TypeError("unknown type: {} (type: {})".format(val, type(val))) diff --git a/ipypublish/filters_pandoc/definitions.py b/ipypublish/filters_pandoc/definitions.py index fbfa8f5..758b39b 100644 --- a/ipypublish/filters_pandoc/definitions.py +++ b/ipypublish/filters_pandoc/definitions.py @@ -1,4 +1,3 @@ - IPUB_META_ROUTE = "ipub.pandoc" ATTRIBUTE_CITE_CLASS = "attribute-Cite" @@ -10,61 +9,60 @@ # NB: it appears '-' and '^' are already used by sphinx PREFIX_MAP = ( - ("", ( - ("classes", ()), - ("attributes", (("latex", "cite"), ("rst", "cite"))) - )), - ("+", ( - ("classes", ()), - ("attributes", (("latex", "cref"), ("rst", "numref"))) - )), - ("!", ( - ("classes", ()), - ("attributes", (("latex", "ref"), ("rst", "ref"))) - )), - ("=", ( - ("classes", ()), - ("attributes", (("latex", "eqref"), ("rst", "eq"))) - )), - ("?", ( - ("classes", ("capital",)), - ("attributes", (("latex", "Cref"), ("rst", "numref"))) - )), - ("&", ( - ("classes", ()), - ("attributes", (("latex", "gls"), ("rst", "gls"))), - )), - ("%", ( - ("classes", ("capital",)), - ("attributes", (("latex", "Gls"), ("rst", "glsc"))) - )), + ("", (("classes", ()), ("attributes", (("latex", "cite"), ("rst", "cite"))))), + ("+", (("classes", ()), ("attributes", (("latex", "cref"), ("rst", "numref"))))), + ("!", (("classes", ()), ("attributes", (("latex", "ref"), ("rst", "ref"))))), + ("=", (("classes", ()), ("attributes", (("latex", "eqref"), ("rst", "eq"))))), + ( + "?", + ( + ("classes", ("capital",)), + ("attributes", (("latex", "Cref"), ("rst", "numref"))), + ), + ), + ("&", (("classes", ()), ("attributes", (("latex", "gls"), ("rst", "gls"))))), + ( + "%", + ( + ("classes", ("capital",)), + ("attributes", (("latex", "Gls"), ("rst", "glsc"))), + ), + ), ) PREFIX_MAP_LATEX_R = ( - ('cref', '+'), - ('Cref', '?'), - ('ref', '!'), - ('eqref', '='), + ("cref", "+"), + ("Cref", "?"), + ("ref", "!"), + ("eqref", "="), ("cite", ""), ("gls", "&"), - ("Gls", "%") - ) + ("Gls", "%"), +) PREFIX_MAP_RST_R = ( - ('numref', '+'), - ('ref', '!'), - ('eq', '='), + ("numref", "+"), + ("ref", "!"), + ("eq", "="), ("cite", ""), ("gls", "&"), - ("glsc", "%") - ) - -CITE_HTML_NAMES = ( - ("Math", "eqn."), - ("Image", "fig."), - ("Table", "tbl.") + ("glsc", "%"), ) +CITE_HTML_NAMES = (("Math", "eqn."), ("Image", "fig."), ("Table", "tbl.")) + RST_KNOWN_ROLES = ( - "py:attr", "py:meth", "py:class", "py:func", "py:mod", - "attr", "meth", "class", "func", "mod", - "download", "doc", "file", "program") + "py:attr", + "py:meth", + "py:class", + "py:func", + "py:mod", + "attr", + "meth", + "class", + "func", + "mod", + "download", + "doc", + "file", + "program", +) diff --git a/ipypublish/filters_pandoc/format_cite_elements.py b/ipypublish/filters_pandoc/format_cite_elements.py index b6b2f59..50ea8d7 100644 --- a/ipypublish/filters_pandoc/format_cite_elements.py +++ b/ipypublish/filters_pandoc/format_cite_elements.py @@ -8,12 +8,12 @@ import panflute as pf from ipypublish.filters_pandoc.definitions import ( - ATTRIBUTE_CITE_CLASS, CONVERTED_CITE_CLASS, - IPUB_META_ROUTE, CITE_HTML_NAMES -) -from ipypublish.filters_pandoc.html_bib import ( - read_bibliography, process_bib_entry + ATTRIBUTE_CITE_CLASS, + CONVERTED_CITE_CLASS, + IPUB_META_ROUTE, + CITE_HTML_NAMES, ) +from ipypublish.filters_pandoc.html_bib import read_bibliography, process_bib_entry def format_cites(cite, doc): @@ -32,48 +32,54 @@ def format_cites(cite, doc): # check is the Cite has a surrounding Span to supply attributed span = None - if (isinstance(cite.parent, pf.Span) - and ATTRIBUTE_CITE_CLASS in cite.parent.classes): + if isinstance(cite.parent, pf.Span) and ATTRIBUTE_CITE_CLASS in cite.parent.classes: span = cite.parent cite_tag = span.attributes.get("latex", cite_tag) cite_role = span.attributes.get("rst", cite_role) html_capitalize = "capital" in span.classes - if (cite_role == "numref" and - (not doc.get_metadata(IPUB_META_ROUTE + ".use_numref", False))): + if cite_role == "numref" and ( + not doc.get_metadata(IPUB_META_ROUTE + ".use_numref", False) + ): cite_role = "ref" if doc.format in ("latex", "tex"): - if cite_tag in ['cite', 'cref', 'Cref'] or len(cite.citations) == 1: + if cite_tag in ["cite", "cref", "Cref"] or len(cite.citations) == 1: # multiple labels are allowed - return pf.RawInline("\\{0}{{{1}}}".format( - cite_tag, - ",".join([c.id for c in cite.citations])), format="tex") + return pf.RawInline( + "\\{0}{{{1}}}".format( + cite_tag, ",".join([c.id for c in cite.citations]) + ), + format="tex", + ) else: - tex = ( - ", ".join(['\\{0}{{{1}}}'.format(cite_tag, c.id) - for c in cite.citations[:-1]]) + - ' and \\{0}{{{1}}}'.format(cite_tag, cite.citations[-1].id)) - return pf.RawInline(tex, format='tex') + tex = ", ".join( + ["\\{0}{{{1}}}".format(cite_tag, c.id) for c in cite.citations[:-1]] + ) + " and \\{0}{{{1}}}".format(cite_tag, cite.citations[-1].id) + return pf.RawInline(tex, format="tex") if doc.format == "rst": if len(cite.citations) == 1: raw = pf.RawInline( - ":{0}:`{1}`".format(cite_role, cite.citations[0].id), - format='rst') + ":{0}:`{1}`".format(cite_role, cite.citations[0].id), format="rst" + ) elif cite_role == "cite": raw = pf.RawInline( - ":{0}:`{1}`".format(cite_role, ",".join( - [c.id for c in cite.citations])), - format='rst') + ":{0}:`{1}`".format( + cite_role, ",".join([c.id for c in cite.citations]) + ), + format="rst", + ) else: raw = pf.RawInline( - ", ".join([':{0}:`{1}`'.format(cite_role, c.id) - for c in cite.citations[:-1]]) + - ' and :{0}:`{1}`'.format(cite_role, cite.citations[-1].id), - format="rst") + ", ".join( + [":{0}:`{1}`".format(cite_role, c.id) for c in cite.citations[:-1]] + ) + + " and :{0}:`{1}`".format(cite_role, cite.citations[-1].id), + format="rst", + ) # in testing, rst cite roles required space either side # to render correctly @@ -96,8 +102,7 @@ def format_cites(cite, doc): unknown = set() for citation in cite.citations: - ref = doc.get_metadata( - "$$references.{}".format(citation.id), False) + ref = doc.get_metadata("$$references.{}".format(citation.id), False) if ref: # ref -> e.g. {"type": "Math", "number": 1} prefix = dict(CITE_HTML_NAMES).get(ref["type"], ref["type"]) @@ -114,8 +119,7 @@ def format_cites(cite, doc): ) elif citation.id in doc.bibdatabase: - cites.add(process_bib_entry( - citation.id, doc.bibdatabase, doc.bibnums)) + cites.add(process_bib_entry(citation.id, doc.bibdatabase, doc.bibnums)) # elements.append(pf.RawInline( # process_bib_entry( # citation.id, doc.bibdatabase, doc.bibnums), @@ -128,31 +132,40 @@ def format_cites(cite, doc): # if found_ref: # return elements # else: - # return pf.RawInline( - # '' - # # 'No reference found for: {}'.format( - # '{}'.format( - # ", ".join([c.id for c in cite.citations]))) + # return pf.RawInline( + # '' + # # 'No reference found for: {}'.format( + # '{}'.format( + # ", ".join([c.id for c in cite.citations]))) elements = [] if cites: # TODO sort - elements.append(pf.RawInline( - '[{}]'.format(",".join(c for c in cites)), - format=doc.format)) + elements.append( + pf.RawInline( + "[{}]".format(",".join(c for c in cites)), + format=doc.format, + ) + ) if names: # TODO sort for prefix, labels in names.items(): - elements.append(pf.RawInline( - '{} {}'.format( - prefix, ",".join(l for l in labels)), - format=doc.format)) + elements.append( + pf.RawInline( + "{} {}".format( + prefix, ",".join(l for l in labels) + ), + format=doc.format, + ) + ) if unknown: - elements.append(pf.RawInline( - '' - # 'No reference found for: {}'.format( - '{}'.format( - ", ".join([l for l in unknown])))) + elements.append( + pf.RawInline( + '' + # 'No reference found for: {}'.format( + "{}".format(", ".join([l for l in unknown])) + ) + ) return elements @@ -170,8 +183,9 @@ def format_span_cites(span, doc): if span.attributes["format"] == "latex": cite_tag = span.attributes["tag"] # TODO use cref for rst ref/numref - return pf.RawInline("\\{0}{{{1}}}".format( - cite_tag, span.identifier, format="tex")) + return pf.RawInline( + "\\{0}{{{1}}}".format(cite_tag, span.identifier, format="tex") + ) if doc.format == "rst": cite_role = "cite" @@ -179,18 +193,20 @@ def format_span_cites(span, doc): cite_role = span.attributes["role"] # TODO use ref for latex ref/cref/Cref return [ - pf.RawInline( - ":{0}:`{1}`".format(cite_role, span.identifier), - format='rst') + pf.RawInline(":{0}:`{1}`".format(cite_role, span.identifier), format="rst") ] if doc.format in ("html", "html5"): # text - return ([pf.RawInline( - ''.format(span.identifier), - format="html")] + - list(span.content) + - [pf.RawInline('', format="html")]) + return ( + [ + pf.RawInline( + ''.format(span.identifier), format="html" + ) + ] + + list(span.content) + + [pf.RawInline("", format="html")] + ) def prepare(doc): @@ -220,9 +236,8 @@ def main(doc=None, strip_spans=True): to_run = [format_cites] if strip_spans: to_run.append(strip_cite_spans) - return pf.run_filters(to_run, - prepare, finalize, doc=doc) + return pf.run_filters(to_run, prepare, finalize, doc=doc) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ipypublish/filters_pandoc/format_label_elements.py b/ipypublish/filters_pandoc/format_label_elements.py index de30285..aeab884 100644 --- a/ipypublish/filters_pandoc/format_label_elements.py +++ b/ipypublish/filters_pandoc/format_label_elements.py @@ -27,14 +27,18 @@ import panflute as pf from ipypublish.filters_pandoc.utils import convert_units, convert_attributes -from ipypublish.filters_pandoc.prepare_labels import (LABELLED_IMAGE_CLASS, LABELLED_MATH_CLASS, LABELLED_TABLE_CLASS) +from ipypublish.filters_pandoc.prepare_labels import ( + LABELLED_IMAGE_CLASS, + LABELLED_MATH_CLASS, + LABELLED_TABLE_CLASS, +) try: from textwrap import indent except ImportError: # added in python 3.3 def indent(text, prefix): - return ''.join(prefix + line for line in text.splitlines(True)) + return "".join(prefix + line for line in text.splitlines(True)) LATEX_FIG_LABELLED = """\\begin{{figure}}[{options}] @@ -53,7 +57,16 @@ def indent(text, prefix): \\caption{{{caption}}} \\end{{figure}}""" # noqa: E501 -MATH_ENVS = ('equation', 'align', 'alignat', 'eqnarray', 'multline', 'gather', 'flalign', 'dmath') +MATH_ENVS = ( + "equation", + "align", + "alignat", + "eqnarray", + "multline", + "gather", + "flalign", + "dmath", +) def format_math(math, doc): @@ -65,49 +78,58 @@ def format_math(math, doc): if not isinstance(math, pf.Math): return None - if math.format != 'DisplayMath': + if math.format != "DisplayMath": return None # test if the math text is already wrapped in an environment - regex = re.compile(r'\\begin\{{((?:{0})\*?)\}}(.*)\\end\{{((?:{0})\*?)\}}'.format('|'.join(MATH_ENVS)), re.DOTALL) + regex = re.compile( + r"\\begin\{{((?:{0})\*?)\}}(.*)\\end\{{((?:{0})\*?)\}}".format( + "|".join(MATH_ENVS) + ), + re.DOTALL, + ) wrap_match = regex.match(math.text) env = None label = None - if (isinstance(math.parent, pf.Span) and LABELLED_MATH_CLASS in math.parent.classes): + if isinstance(math.parent, pf.Span) and LABELLED_MATH_CLASS in math.parent.classes: span = math.parent - numbered = '*' if 'unnumbered' in span.classes else '' - env = span.attributes.get('env', 'equation') + numbered + numbered = "*" if "unnumbered" in span.classes else "" + env = span.attributes.get("env", "equation") + numbered label = span.identifier - if doc.format in ('tex', 'latex'): + if doc.format in ("tex", "latex"): if wrap_match: # TODO edge case where a label has been specified, but the math is already wrapped tex = math.text else: - tex = '\\begin{{{0}}}{1}\\label{{{2}}}\\end{{{0}}}'.format(env or 'equation', math.text, label or '') - return pf.RawInline(tex, format='tex') + tex = "\\begin{{{0}}}{1}\\label{{{2}}}\\end{{{0}}}".format( + env or "equation", math.text, label or "" + ) + return pf.RawInline(tex, format="tex") - elif doc.format in ('rst'): + elif doc.format in ("rst"): if env: - tex = indent('\\begin{{{0}}}{1}\\end{{{0}}}'.format(env, math.text), ' ') + tex = indent("\\begin{{{0}}}{1}\\end{{{0}}}".format(env, math.text), " ") else: - tex = indent(math.text.strip(), ' ') - rst = '\n\n.. math::\n' + tex = indent(math.text.strip(), " ") + rst = "\n\n.. math::\n" if wrap_match or env: - rst += ' :nowrap:\n' + rst += " :nowrap:\n" if label: - rst += ' :label: {}\n'.format(label) - rst += '\n{}\n\n'.format(tex) - return pf.RawInline(rst, format='rst') + rst += " :label: {}\n".format(label) + rst += "\n{}\n\n".format(tex) + return pf.RawInline(rst, format="rst") - elif doc.format in ('html', 'html5'): + elif doc.format in ("html", "html5"): # new_span = pf.Span(anchor_start, math, anchor_end) # TODO add formatting # TODO name by count if label: if not wrap_match: - math.text = '\\begin{{{0}}}{1}\\end{{{0}}}'.format(env or 'equation', math.text) + math.text = "\\begin{{{0}}}{1}\\end{{{0}}}".format( + env or "equation", math.text + ) return _wrap_in_anchor(math, label) else: return None @@ -123,7 +145,10 @@ def format_image(image, doc): return None span = None - if (isinstance(image.parent, pf.Span) and LABELLED_IMAGE_CLASS in image.parent.classes): + if ( + isinstance(image.parent, pf.Span) + and LABELLED_IMAGE_CLASS in image.parent.classes + ): span = image.parent if span is not None: @@ -135,35 +160,41 @@ def format_image(image, doc): attributes = image.attributes # classes = image.classes - if doc.format in ('tex', 'latex'): + if doc.format in ("tex", "latex"): new_doc = Doc(pf.Para(*image.content)) new_doc.api_version = doc.api_version if image.content: - caption = pf.run_pandoc(json.dumps(new_doc.to_json()), args=['-f', 'json', '-t', 'latex']).strip() + caption = pf.run_pandoc( + json.dumps(new_doc.to_json()), args=["-f", "json", "-t", "latex"] + ).strip() else: - caption = '' + caption = "" - options = attributes.get('placement', '') - size = '' # max width set as 0.9\linewidth - if 'width' in attributes: - width = convert_units(attributes['width'], 'fraction') - size = 'width={0}\\linewidth'.format(width) - elif 'height' in attributes: - height = convert_units(attributes['height'], 'fraction') - size = 'height={0}\\paperheight'.format(height) + options = attributes.get("placement", "") + size = "" # max width set as 0.9\linewidth + if "width" in attributes: + width = convert_units(attributes["width"], "fraction") + size = "width={0}\\linewidth".format(width) + elif "height" in attributes: + height = convert_units(attributes["height"], "fraction") + size = "height={0}\\paperheight".format(height) if identifier: - latex = LATEX_FIG_LABELLED.format(label=identifier, - options=options, - path=image.url, - caption=caption, - size=size) + latex = LATEX_FIG_LABELLED.format( + label=identifier, + options=options, + path=image.url, + caption=caption, + size=size, + ) else: - latex = LATEX_FIG_UNLABELLED.format(options=options, path=image.url, caption=caption, size=size) + latex = LATEX_FIG_UNLABELLED.format( + options=options, path=image.url, caption=caption, size=size + ) - return pf.RawInline(latex, format='tex') + return pf.RawInline(latex, format="tex") - elif doc.format in ('rst',): + elif doc.format in ("rst",): if not image.content.list: # If the container is empty, then pandoc will assign an iterative # reference identifier to it (image0, image1). @@ -178,7 +209,7 @@ def format_image(image, doc): return image # TODO formatting and span identifier (convert width/height to %) - elif doc.format in ('html', 'html5'): + elif doc.format in ("html", "html5"): if identifier: return _wrap_in_anchor(image, identifier) else: @@ -198,7 +229,10 @@ def format_table(table, doc): return None div = None # type: pf.Div - if (isinstance(table.parent, pf.Div) and LABELLED_TABLE_CLASS in table.parent.classes): + if ( + isinstance(table.parent, pf.Div) + and LABELLED_TABLE_CLASS in table.parent.classes + ): div = table.parent if div is None: @@ -206,53 +240,71 @@ def format_table(table, doc): attributes = convert_attributes(div.attributes) - if 'align' in div.attributes: - align_text = attributes['align'] - align = [{'l': 'AlignLeft', 'r': 'AlignRight', 'c': 'AlignCenter'}.get(a, None) for a in align_text] + if "align" in div.attributes: + align_text = attributes["align"] + align = [ + {"l": "AlignLeft", "r": "AlignRight", "c": "AlignCenter"}.get(a, None) + for a in align_text + ] if None in align: - raise ValueError("table '{0}' alignment must contain only l,r,c:" ' {1}'.format(div.identifier, align_text)) + raise ValueError( + "table '{0}' alignment must contain only l,r,c:" + " {1}".format(div.identifier, align_text) + ) table.alignment = align - attributes['align'] = align + attributes["align"] = align - if 'widths' in div.attributes: - widths = attributes['widths'] + if "widths" in div.attributes: + widths = attributes["widths"] try: widths = [float(w) for w in widths] except Exception: - raise ValueError("table '{0}' widths must be a list of numbers:" ' {1}'.format(div.identifier, widths)) + raise ValueError( + "table '{0}' widths must be a list of numbers:" + " {1}".format(div.identifier, widths) + ) table.width = widths - attributes['widths'] = widths + attributes["widths"] = widths - if doc.format in ('tex', 'latex'): + if doc.format in ("tex", "latex"): # TODO placement - table.caption.append(pf.RawInline('\\label{{{0}}}'.format(div.identifier), format='tex')) + table.caption.append( + pf.RawInline("\\label{{{0}}}".format(div.identifier), format="tex") + ) return table - if doc.format in ('rst',): + if doc.format in ("rst",): # pandoc 2.6 doesn't output table options if attributes: tbl_doc = pf.Doc(table) tbl_doc.api_version = doc.api_version - tbl_str = pf.convert_text(tbl_doc, input_format='panflute', output_format='rst') + tbl_str = pf.convert_text( + tbl_doc, input_format="panflute", output_format="rst" + ) tbl_lines = tbl_str.splitlines() - if tbl_lines[1].strip() == '': - tbl_lines.insert(1, ' :align: center') - if 'widths' in attributes: + if tbl_lines[1].strip() == "": + tbl_lines.insert(1, " :align: center") + if "widths" in attributes: # in rst widths must be integers - widths = ' '.join([str(int(w * 10)) for w in table.width]) - tbl_lines.insert(1, ' :widths: {}'.format(widths)) + widths = " ".join([str(int(w * 10)) for w in table.width]) + tbl_lines.insert(1, " :widths: {}".format(widths)) # TODO rst column alignment, see # https://cloud-sptheme.readthedocs.io/en/latest/lib/cloud_sptheme.ext.table_styling.html return [ - pf.Para(pf.RawInline('.. _`{0}`:'.format(div.identifier), format='rst')), - pf.RawBlock('\n'.join(tbl_lines) + '\n\n', format=doc.format) + pf.Para( + pf.RawInline(".. _`{0}`:".format(div.identifier), format="rst") + ), + pf.RawBlock("\n".join(tbl_lines) + "\n\n", format=doc.format), ] - return [pf.Para(pf.RawInline('.. _`{0}`:'.format(div.identifier), format='rst')), table] + return [ + pf.Para(pf.RawInline(".. _`{0}`:".format(div.identifier), format="rst")), + table, + ] - if doc.format in ('html', 'html5'): + if doc.format in ("html", "html5"): return _wrap_in_anchor(table, div.identifier, inline=False) # TODO formatting, name by count @@ -260,10 +312,14 @@ def format_table(table, doc): def strip_labelled_spans(element, doc): # type: (Span, Doc) -> Element - if isinstance(element, pf.Span) and set(element.classes).intersection([LABELLED_IMAGE_CLASS, LABELLED_MATH_CLASS]): + if isinstance(element, pf.Span) and set(element.classes).intersection( + [LABELLED_IMAGE_CLASS, LABELLED_MATH_CLASS] + ): return list(element.content) - if isinstance(element, pf.Div) and set(element.classes).intersection([LABELLED_TABLE_CLASS]): + if isinstance(element, pf.Div) and set(element.classes).intersection( + [LABELLED_TABLE_CLASS] + ): return list(element.content) @@ -277,8 +333,10 @@ def _wrap_in_anchor(element, label, inline=True): raw = pf.RawInline else: raw = pf.RawBlock - anchor_start = raw(''.format(label), format='html') - anchor_end = raw('', format='html') + anchor_start = raw( + ''.format(label), format="html" + ) + anchor_end = raw("", format="html") return [anchor_start, element, anchor_end] @@ -300,5 +358,5 @@ def main(doc=None, strip_spans=True): return pf.run_filters(to_run, prepare, finalize, doc=doc) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ipypublish/filters_pandoc/format_raw_spans.py b/ipypublish/filters_pandoc/format_raw_spans.py index 9719ed8..3b2ea6b 100644 --- a/ipypublish/filters_pandoc/format_raw_spans.py +++ b/ipypublish/filters_pandoc/format_raw_spans.py @@ -6,13 +6,16 @@ """ import itertools + # from textwrap import fill as textwrap from panflute import Element, Doc, Span # noqa: F401 import panflute as pf from ipypublish.filters_pandoc.definitions import ( - CONVERTED_OTHER_CLASS, CONVERTED_DIRECTIVE_CLASS, IPUB_META_ROUTE + CONVERTED_OTHER_CLASS, + CONVERTED_DIRECTIVE_CLASS, + IPUB_META_ROUTE, ) @@ -23,22 +26,24 @@ def process_raw_spans(container, doc): hide_raw = doc.get_metadata(IPUB_META_ROUTE + ".hide_raw", False) - if (CONVERTED_OTHER_CLASS in container.classes - and isinstance(container, pf.Span)): + if CONVERTED_OTHER_CLASS in container.classes and isinstance(container, pf.Span): if doc.format == "rst" and container.attributes["format"] == "latex": if container.attributes["tag"] in ["todo"]: - return pf.Str("\n\n.. {}:: {}\n\n".format( - container.attributes["tag"], - container.attributes["content"])) + return pf.Str( + "\n\n.. {}:: {}\n\n".format( + container.attributes["tag"], container.attributes["content"] + ) + ) if container.attributes["tag"] == "ensuremath": - return pf.RawInline(":math:`{}`".format( - container.attributes["content"]), format='rst') + return pf.RawInline( + ":math:`{}`".format(container.attributes["content"]), format="rst" + ) - return pf.RawInline(container.attributes.get("original"), - format=container.attributes["format"]) + return pf.RawInline( + container.attributes.get("original"), format=container.attributes["format"] + ) - if (CONVERTED_DIRECTIVE_CLASS in container.classes - and isinstance(container, pf.Div)): + if CONVERTED_DIRECTIVE_CLASS in container.classes and isinstance(container, pf.Div): # convert the directive head, which will be e.g. # Para(Str(..) Space Str(toctree::) SoftBreak Str(:maxdepth:) Space Str(2) SoftBreak Str(:numbered:)) # noqa # we need to spilt on the soft breaks, @@ -47,16 +52,20 @@ def process_raw_spans(container, doc): if doc.format in ("rst"): # split into lines by soft breaks - header_lines = [list(y) for x, y in itertools.groupby( - container.content[0].content, - lambda z: isinstance(z, pf.SoftBreak)) if not x] + header_lines = [ + list(y) + for x, y in itertools.groupby( + container.content[0].content, lambda z: isinstance(z, pf.SoftBreak) + ) + if not x + ] # wrap each line in a Para and convert block with pandoc head_doc = pf.Doc(*[pf.Para(*l) for l in header_lines]) head_doc.api_version = doc.api_version - head_str = pf.convert_text(head_doc, - input_format="panflute", - output_format=doc.format) + head_str = pf.convert_text( + head_doc, input_format="panflute", output_format=doc.format + ) # remove blank lines and indent head_str = head_str.replace("\n\n", "\n ") + "\n\n" head_block = pf.RawBlock(head_str, format=doc.format) @@ -71,8 +80,7 @@ def process_raw_spans(container, doc): new_elements = [pf.RawInline("%^*", format=doc.format)] for el in block.content: if isinstance(el, pf.SoftBreak): - new_elements.append( - pf.RawInline("?&@", format=doc.format)) + new_elements.append(pf.RawInline("?&@", format=doc.format)) else: new_elements.append(el) block.content = new_elements @@ -81,62 +89,69 @@ def process_raw_spans(container, doc): # convert body content with pandoc body_doc = pf.Doc(*body_blocks) body_doc.api_version = doc.api_version - body_str = pf.convert_text(body_doc, - input_format="panflute", - output_format=doc.format) + body_str = pf.convert_text( + body_doc, input_format="panflute", output_format=doc.format + ) # raise ValueError(body_blocks) - body_str = body_str.replace( - "%^*", " ").replace("?&@", "\n ") + body_str = body_str.replace("%^*", " ").replace("?&@", "\n ") # ensure all lines are indented correctly # (doesn't occur by default?) - body_str = "\n".join( - [" " + l.lstrip() if l.strip() else l - for l in body_str.splitlines()]) + '\n\n' + body_str = ( + "\n".join( + [ + " " + l.lstrip() if l.strip() else l + for l in body_str.splitlines() + ] + ) + + "\n\n" + ) body_block = pf.RawBlock(body_str, format=doc.format) return [head_block, body_block] - elif (doc.format in ("html", "html5") - and container.attributes["format"] == "rst"): + elif ( + doc.format in ("html", "html5") and container.attributes["format"] == "rst" + ): if hide_raw: return [] head_para = pf.Para( - *[pf.RawInline("
" + " " * 4) - if isinstance(c, pf.SoftBreak) - else c - for c in container.content[0].content]) - head_str = pf.convert_text(head_para, - input_format="panflute", - output_format=doc.format) + *[ + pf.RawInline("
" + " " * 4) + if isinstance(c, pf.SoftBreak) + else c + for c in container.content[0].content + ] + ) + head_str = pf.convert_text( + head_para, input_format="panflute", output_format=doc.format + ) if len(container.content) > 1: body_doc = pf.Doc(*container.content[1:]) body_doc.api_version = doc.api_version - body_str = pf.convert_text(body_doc, - input_format="panflute", - output_format=doc.format) - body_str = ('

' - '{0}
').format(body_str) + body_str = pf.convert_text( + body_doc, input_format="panflute", output_format=doc.format + ) + body_str = ( + '

' "{0}
" + ).format(body_str) else: body_str = "" return pf.RawBlock( '
' - '{1}{2}' - '
'.format( - container.attributes.get("directive", ""), - head_str, - body_str + "{1}{2}" + "".format( + container.attributes.get("directive", ""), head_str, body_str ), - format="html" + format="html", ) - elif (doc.format in ("tex", "latex") - and container.attributes["format"] == "rst"): + elif doc.format in ("tex", "latex") and container.attributes["format"] == "rst": if hide_raw: return [] @@ -156,24 +171,24 @@ def process_raw_spans(container, doc): box_close = "\\end{mdframed}" if len(container.content) == 1: - return pf.RawBlock( - box_open + box_close, - format="tex") + return pf.RawBlock(box_open + box_close, format="tex") else: return ( - [pf.RawBlock(box_open, format="tex")] + - list(container.content[1:]) + - [pf.RawBlock(box_close, format="tex")] + [pf.RawBlock(box_open, format="tex")] + + list(container.content[1:]) + + [pf.RawBlock(box_close, format="tex")] ) - return pf.RawBlock(pf.stringify(pf.Doc(*container.content)), - format=container.attributes["format"]) + return pf.RawBlock( + pf.stringify(pf.Doc(*container.content)), + format=container.attributes["format"], + ) - if (CONVERTED_OTHER_CLASS in container.classes - and isinstance(container, pf.Div)): - return pf.RawBlock(pf.stringify( - pf.Doc(*container.content)), - format=container.attributes["format"]) + if CONVERTED_OTHER_CLASS in container.classes and isinstance(container, pf.Div): + return pf.RawBlock( + pf.stringify(pf.Doc(*container.content)), + format=container.attributes["format"], + ) # now unused @@ -215,6 +230,7 @@ def process_raw_spans(container, doc): # return content + def process_code_latex(code, doc): # type: (pf.CodeBlock, Doc) -> Element if doc.format not in ("tex", "latex"): @@ -243,9 +259,10 @@ def main(doc=None): # type: (Doc) -> None """ """ - return pf.run_filters([process_raw_spans, process_code_latex], - prepare, finalize, doc=doc) + return pf.run_filters( + [process_raw_spans, process_code_latex], prepare, finalize, doc=doc + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ipypublish/filters_pandoc/html_bib.py b/ipypublish/filters_pandoc/html_bib.py index 5d9070c..5349f2c 100644 --- a/ipypublish/filters_pandoc/html_bib.py +++ b/ipypublish/filters_pandoc/html_bib.py @@ -13,7 +13,7 @@ def read_bibliography(path, raise_error=True): bibdatabase = {} bibparser = bibtexparser.bparser.BibTexParser() try: - if hasattr(path, 'open'): + if hasattr(path, "open"): with path.open(encoding="utf8") as bibtex_file: bibtex_data = bibtex_file.read() else: @@ -23,27 +23,29 @@ def read_bibliography(path, raise_error=True): bibdatabase = bibparser.parse(bibtex_data).entries_dict except Exception as err: if raise_error: - raise IOError('could not read bibliopath {}: {}'.format(path, err)) + raise IOError("could not read bibliopath {}: {}".format(path, err)) return bibdatabase -def process_bib_entry(cid, bibdatabase, bibnums, - fallback_fmt="[{author_abbrev}, {year}]"): +def process_bib_entry( + cid, bibdatabase, bibnums, fallback_fmt="[{author_abbrev}, {year}]" +): """work out the best way to represent the bib entry """ entry = bibdatabase[cid] if cid not in bibnums: bibnums[cid] = len(bibnums) + 1 - if 'doi' in entry: + if "doi" in entry: return r'{text}'.format( - doi=entry['doi'], text=bibnums[cid]) - elif 'url' in entry: + doi=entry["doi"], text=bibnums[cid] + ) + elif "url" in entry: + return r'{text}'.format(url=entry["url"], text=bibnums[cid]) + elif "link" in entry: return r'{text}'.format( - url=entry['url'], text=bibnums[cid]) - elif 'link' in entry: - return r'{text}'.format( - url=entry['link'], text=bibnums[cid]) + url=entry["link"], text=bibnums[cid] + ) else: return bibnums[cid] # add_abbreviated_author(entry) @@ -53,10 +55,10 @@ def process_bib_entry(cid, bibdatabase, bibnums, def add_abbreviated_author(entry): # abbreviate a list of authors - if 'author' in entry: - authors = re.split(", | and ", entry['author']) + if "author" in entry: + authors = re.split(", | and ", entry["author"]) if len(authors) > 1: - author_abbrev = authors[0] + ' et al' + author_abbrev = authors[0] + " et al" else: author_abbrev = authors[0] entry["author_abbrev"] = author_abbrev @@ -64,18 +66,18 @@ def add_abbreviated_author(entry): def split_date(entry): # split up date into year, month, day - if 'date' in entry: - date = entry['date'].split('-') + if "date" in entry: + date = entry["date"].split("-") if len(date) == 3: - entry['year'] = date[0] - entry['month'] = date[1] - entry['day'] = date[2] + entry["year"] = date[0] + entry["month"] = date[1] + entry["day"] = date[2] else: - entry['year'] = date[0] + entry["year"] = date[0] class DefaultFormatter(string.Formatter): - def __init__(self, default=''): + def __init__(self, default=""): self.default = default def get_value(self, key, args, kwds): @@ -95,7 +97,7 @@ def safe_str(obj): return str(obj) except UnicodeEncodeError: # python 2.7 - obj = re.sub(u"\u2013", "-", obj) # en dash + obj = re.sub(u"\u2013", "-", obj) # en dash obj = re.sub(u"\u2014", "--", obj) # em dash - return obj.encode('ascii', 'ignore').decode('ascii') + return obj.encode("ascii", "ignore").decode("ascii") return "" diff --git a/ipypublish/filters_pandoc/main.py b/ipypublish/filters_pandoc/main.py index dc508fc..085228c 100644 --- a/ipypublish/filters_pandoc/main.py +++ b/ipypublish/filters_pandoc/main.py @@ -12,12 +12,15 @@ import panflute as pf from ipypublish.filters_pandoc.definitions import IPUB_META_ROUTE -from ipypublish.filters_pandoc.utils import ( - apply_filter, get_option, create_ipub_meta) +from ipypublish.filters_pandoc.utils import apply_filter, get_option, create_ipub_meta from ipypublish.filters_pandoc import ( - prepare_cites, prepare_labels, prepare_raw, - format_cite_elements, format_label_elements, format_raw_spans, - rmarkdown_to_mpe + prepare_cites, + prepare_labels, + prepare_raw, + format_cite_elements, + format_label_elements, + format_raw_spans, + rmarkdown_to_mpe, ) @@ -28,17 +31,15 @@ def pandoc_filters(): doc = pf.load() # in an rmarkdown file, the metadata will be under a root `jupyter` key - jmeta = doc.get_metadata('jupyter', {}) + jmeta = doc.get_metadata("jupyter", {}) meta = pf.tools.meta2builtin(doc.metadata) - if 'jupyter' in meta and hasattr(meta["jupyter"], 'items'): + if "jupyter" in meta and hasattr(meta["jupyter"], "items"): jmeta = meta.pop("jupyter") meta.update(jmeta) doc.metadata = meta # builtin2meta(meta) - apply_filters = doc.get_metadata(IPUB_META_ROUTE + ".apply_filters", - default=True) - convert_raw = doc.get_metadata(IPUB_META_ROUTE + ".convert_raw", - default=True) + apply_filters = doc.get_metadata(IPUB_META_ROUTE + ".apply_filters", default=True) + convert_raw = doc.get_metadata(IPUB_META_ROUTE + ".convert_raw", default=True) if apply_filters: if convert_raw: @@ -49,7 +50,7 @@ def pandoc_filters(): format_cite_elements.main, format_raw_spans.main, format_label_elements.main, - rmarkdown_to_mpe.main + rmarkdown_to_mpe.main, ] else: filters = [ @@ -57,7 +58,7 @@ def pandoc_filters(): prepare_labels.main, format_cite_elements.main, format_label_elements.main, - rmarkdown_to_mpe.main + rmarkdown_to_mpe.main, ] else: filters = [] @@ -69,8 +70,9 @@ def pandoc_filters(): pf.dump(doc) -def jinja_filter(source, to_format, nb_metadata, cell_metadata, - from_format="markdown", strip=True): +def jinja_filter( + source, to_format, nb_metadata, cell_metadata, from_format="markdown", strip=True +): """run a set of ipypublish pandoc filters as a Jinja2 filter We convert the source to an intermediary pandoc-json AST format, @@ -140,25 +142,27 @@ def jinja_filter(source, to_format, nb_metadata, cell_metadata, # find the preferential versions of the metadata values # TODO a make this autopopulate (possibly from schema) option_preference = [doc.metadata, cell_metadata, nb_metadata] - apply_filters = get_option(option_preference, - keypath=IPUB_META_ROUTE + ".apply_filters", - default=True) - convert_raw = get_option(option_preference, - keypath=IPUB_META_ROUTE + ".convert_raw", - default=True) - hide_raw = get_option(option_preference, - keypath=IPUB_META_ROUTE + ".hide_raw", - default=False) - numref = get_option(option_preference, - keypath=IPUB_META_ROUTE + ".use_numref", default=True) - at_notation = get_option(option_preference, - keypath=IPUB_META_ROUTE + ".at_notation", - default=True) - reftag = get_option(option_preference, - keypath=IPUB_META_ROUTE + ".reftag", default="cite") - strip_meta = get_option(option_preference, - keypath=IPUB_META_ROUTE + ".strip_meta", - default=True) + apply_filters = get_option( + option_preference, keypath=IPUB_META_ROUTE + ".apply_filters", default=True + ) + convert_raw = get_option( + option_preference, keypath=IPUB_META_ROUTE + ".convert_raw", default=True + ) + hide_raw = get_option( + option_preference, keypath=IPUB_META_ROUTE + ".hide_raw", default=False + ) + numref = get_option( + option_preference, keypath=IPUB_META_ROUTE + ".use_numref", default=True + ) + at_notation = get_option( + option_preference, keypath=IPUB_META_ROUTE + ".at_notation", default=True + ) + reftag = get_option( + option_preference, keypath=IPUB_META_ROUTE + ".reftag", default="cite" + ) + strip_meta = get_option( + option_preference, keypath=IPUB_META_ROUTE + ".strip_meta", default=True + ) if apply_filters: # TODO store the original metadata and replace it at end? @@ -166,12 +170,16 @@ def jinja_filter(source, to_format, nb_metadata, cell_metadata, # set metadata with preferential values meta = pf.tools.meta2builtin(doc.metadata) - meta.update(create_ipub_meta({ - "use_numref": numref, - "at_notation": at_notation, - "reftag": reftag, - "hide_raw": hide_raw - })) + meta.update( + create_ipub_meta( + { + "use_numref": numref, + "at_notation": at_notation, + "reftag": reftag, + "hide_raw": hide_raw, + } + ) + ) doc.metadata = meta # builtin2meta(meta) # doc.metadata["ipub"]["use_numref"] = builtin2meta(numref) @@ -186,24 +194,25 @@ def jinja_filter(source, to_format, nb_metadata, cell_metadata, prepare_labels.main, format_cite_elements.main, format_raw_spans.main, - format_label_elements.main + format_label_elements.main, ] else: filters = [ prepare_cites.main, prepare_labels.main, format_cite_elements.main, - format_label_elements.main + format_label_elements.main, ] else: filters = [] - out_str = apply_filter(doc, - filters, - in_format=from_format, - out_format=to_format, - strip_meta=bool(strip_meta) - ) + out_str = apply_filter( + doc, + filters, + in_format=from_format, + out_format=to_format, + strip_meta=bool(strip_meta), + ) if strip: out_str = out_str.strip() diff --git a/ipypublish/filters_pandoc/prepare_cites.py b/ipypublish/filters_pandoc/prepare_cites.py index 54734d4..281d011 100644 --- a/ipypublish/filters_pandoc/prepare_cites.py +++ b/ipypublish/filters_pandoc/prepare_cites.py @@ -36,10 +36,12 @@ import panflute as pf -from ipypublish.filters_pandoc.utils import ( - find_attributes, get_pf_content_attr) +from ipypublish.filters_pandoc.utils import find_attributes, get_pf_content_attr from ipypublish.filters_pandoc.definitions import ( - ATTRIBUTE_CITE_CLASS, IPUB_META_ROUTE, PREFIX_MAP) + ATTRIBUTE_CITE_CLASS, + IPUB_META_ROUTE, + PREFIX_MAP, +) def process_citations(element, doc): @@ -72,8 +74,11 @@ def process_citations(element, doc): append = None # check if the cite has a valid prefix, if so extract it - if (isinstance(subel.prev, pf.Str) and subel.prev.text - and (subel.prev.text[-1] in dict(PREFIX_MAP))): + if ( + isinstance(subel.prev, pf.Str) + and subel.prev.text + and (subel.prev.text[-1] in dict(PREFIX_MAP)) + ): prefix = subel.prev.text[-1] mapping = dict(dict(PREFIX_MAP)[prefix]) @@ -95,9 +100,9 @@ def process_citations(element, doc): if classes or attributes: classes.append(ATTRIBUTE_CITE_CLASS) - final_content.append(pf.Span(subel, - classes=sorted(set(classes)), - attributes=attributes)) + final_content.append( + pf.Span(subel, classes=sorted(set(classes)), attributes=attributes) + ) else: final_content.append(subel) @@ -123,9 +128,8 @@ def main(doc=None, extract_formats=True): """if extract_formats then convert citations defined in latex, rst or html formats to special Span elements """ - return pf.run_filter(process_citations, - prepare, finalize, doc=doc) + return pf.run_filter(process_citations, prepare, finalize, doc=doc) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ipypublish/filters_pandoc/prepare_labels.py b/ipypublish/filters_pandoc/prepare_labels.py index b4d206f..a6d71cd 100644 --- a/ipypublish/filters_pandoc/prepare_labels.py +++ b/ipypublish/filters_pandoc/prepare_labels.py @@ -35,7 +35,10 @@ import panflute as pf from ipypublish.filters_pandoc.utils import ( - compare_version, get_panflute_containers, find_attributes) + compare_version, + get_panflute_containers, + find_attributes, +) LABELLED_IMAGE_CLASS = "labelled-Image" LABELLED_MATH_CLASS = "labelled-Math" @@ -58,7 +61,8 @@ def resolve_tables(element, doc): # attributes = _find_attribute(element.caption[0], # allow_any=True, delete_preceding=False) attributes = find_attributes( - element.caption[-1], search_left=True, include_element=True) + element.caption[-1], search_left=True, include_element=True + ) if not attributes: return None @@ -66,21 +70,19 @@ def resolve_tables(element, doc): # update count doc.refcount[ref_type] += 1 # add to metadata - doc.metadata[ - "$$references"][attributes["id"]] = pf.MetaMap(**{ - "type": pf.MetaString(ref_type), - "number": doc.refcount[ref_type] - }) + doc.metadata["$$references"][attributes["id"]] = pf.MetaMap( + **{"type": pf.MetaString(ref_type), "number": doc.refcount[ref_type]} + ) # remove attribute from caption - element.caption = [el for el in element.caption - if el not in attributes["elements"]] + element.caption = [el for el in element.caption if el not in attributes["elements"]] # wrap in a div - return pf.Div(element, - classes=[ - "labelled-{}".format(ref_type)] + attributes["classes"], - attributes=attributes["attributes"], - identifier=attributes["id"]) + return pf.Div( + element, + classes=["labelled-{}".format(ref_type)] + attributes["classes"], + attributes=attributes["attributes"], + identifier=attributes["id"], + ) def resolve_equations_images(element, doc): @@ -111,7 +113,7 @@ def resolve_equations_images(element, doc): subel = subel.next continue - if isinstance(subel, pf.Image) and compare_version('1.16', '>='): + if isinstance(subel, pf.Image) and compare_version("1.16", ">="): # pandoc >= 1.16 already supports this # TODO for pandoc < 1.16 also look for attributes attached, # to the image path, as occurs with image references @@ -120,7 +122,7 @@ def resolve_equations_images(element, doc): "id": subel.identifier, # "classes": subel.classes, # "attributes": subel.attributes, - "elements": [] + "elements": [], } else: @@ -134,26 +136,26 @@ def resolve_equations_images(element, doc): # update count doc.refcount[ref_type] += 1 # add to metadata - doc.metadata[ - "$$references"][attributes["id"]] = pf.MetaMap(**{ - "type": pf.MetaString(ref_type), - "number": doc.refcount[ref_type] - }) + doc.metadata["$$references"][attributes["id"]] = pf.MetaMap( + **{"type": pf.MetaString(ref_type), "number": doc.refcount[ref_type]} + ) to_delete.update(attributes["elements"]) subel = subel.next new_content = [ - pf.Span(el, - classes=[ - "labelled-{}".format(ref_type)] + to_wrap[el]["classes"], - attributes=to_wrap[el]["attributes"], - identifier=to_wrap[el]["id"] - ) - if el in to_wrap else el + pf.Span( + el, + classes=["labelled-{}".format(ref_type)] + to_wrap[el]["classes"], + attributes=to_wrap[el]["attributes"], + identifier=to_wrap[el]["id"], + ) + if el in to_wrap + else el for el in element.content - if el not in to_delete] + if el not in to_delete + ] # if isinstance(element, pf.Plain): # return pf.Plain(*new_content) @@ -165,11 +167,7 @@ def resolve_equations_images(element, doc): def prepare(doc): # type: (Doc) -> None - doc.refcount = { - "Table": 0, - "Image": 0, - "Math": 0 - } + doc.refcount = {"Table": 0, "Image": 0, "Math": 0} doc.metadata["$$references"] = pf.MetaMap() @@ -180,9 +178,10 @@ def finalize(doc): def main(doc=None): # type: (Doc) -> None - return pf.run_filters([resolve_tables, resolve_equations_images], - prepare, finalize, doc=doc) + return pf.run_filters( + [resolve_tables, resolve_equations_images], prepare, finalize, doc=doc + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ipypublish/filters_pandoc/prepare_raw.py b/ipypublish/filters_pandoc/prepare_raw.py index 742191e..b0a9980 100644 --- a/ipypublish/filters_pandoc/prepare_raw.py +++ b/ipypublish/filters_pandoc/prepare_raw.py @@ -7,20 +7,23 @@ import panflute as pf from ipypublish.filters_pandoc.definitions import ( - ATTRIBUTE_CITE_CLASS, PREFIX_MAP, PREFIX_MAP_LATEX_R, PREFIX_MAP_RST_R, - RST_KNOWN_ROLES, RAWSPAN_CLASS, RAWDIV_CLASS, - CONVERTED_CITE_CLASS, CONVERTED_OTHER_CLASS, CONVERTED_DIRECTIVE_CLASS + ATTRIBUTE_CITE_CLASS, + PREFIX_MAP, + PREFIX_MAP_LATEX_R, + PREFIX_MAP_RST_R, + RST_KNOWN_ROLES, + RAWSPAN_CLASS, + RAWDIV_CLASS, + CONVERTED_CITE_CLASS, + CONVERTED_OTHER_CLASS, + CONVERTED_DIRECTIVE_CLASS, ) -from ipypublish.filters_pandoc.utils import ( - get_panflute_containers, get_pf_content_attr) +from ipypublish.filters_pandoc.utils import get_panflute_containers, get_pf_content_attr -def create_cite_span(identifiers, rawformat, is_block, - prefix="", alt=None): +def create_cite_span(identifiers, rawformat, is_block, prefix="", alt=None): """create a cite element from an identifier """ - citations = [pf.Citation( - identifier - ) for identifier in identifiers] + citations = [pf.Citation(identifier) for identifier in identifiers] pmapping = dict(dict(PREFIX_MAP)[prefix]) classes = list(pmapping["classes"]) classes += [RAWSPAN_CLASS, CONVERTED_CITE_CLASS, ATTRIBUTE_CITE_CLASS] @@ -29,11 +32,7 @@ def create_cite_span(identifiers, rawformat, is_block, if alt is not None: attributes["alt"] = str(alt) cite = Cite(citations=citations) - span = pf.Span( - cite, - classes=classes, - attributes=attributes - ) + span = pf.Span(cite, classes=classes, attributes=attributes) if is_block: return pf.Plain(span) else: @@ -45,14 +44,17 @@ def process_internal_links(link, doc): """extract links that point to internal items, e.g. [text](#label)""" if not isinstance(link, pf.Link): return None - match = re.match(r'#(.+)$', link.url) + match = re.match(r"#(.+)$", link.url) if not match: return None return create_cite_span( - [match.group(1)], "markdown", False, + [match.group(1)], + "markdown", + False, prefix=dict(PREFIX_MAP_LATEX_R).get("cref"), - alt=pf.stringify(pf.Plain(*list(link.content))).strip()) + alt=pf.stringify(pf.Plain(*list(link.content))).strip(), + ) def process_html_cites(container, doc): @@ -81,13 +83,14 @@ def process_html_cites(container, doc): skip = skip - 1 continue - if not (isinstance(element, (pf.RawInline, pf.RawBlock)) and - element.format in ("html", "html4", "html5")): + if not ( + isinstance(element, (pf.RawInline, pf.RawBlock)) + and element.format in ("html", "html4", "html5") + ): new_content.append(element) continue - match = re.match( - r"\"]*)\"?>", element.text) + match = re.match(r"\"]*)\"?>", element.text) if not match: new_content.append(element) continue @@ -97,8 +100,10 @@ def process_html_cites(container, doc): closing = element.next while closing: - if (isinstance(closing, pf.RawInline) and - closing.format in ("html", "html5")): + if isinstance(closing, pf.RawInline) and closing.format in ( + "html", + "html5", + ): endmatch = re.match(r"^\s*\s*$", closing.text) if endmatch: break @@ -110,8 +115,9 @@ def process_html_cites(container, doc): continue # TODO include original content - new_content.append(create_cite_span([match.group(1)], "html", - isinstance(element, pf.RawBlock))) + new_content.append( + create_cite_span([match.group(1)], "html", isinstance(element, pf.RawBlock)) + ) skip = len(span_content) + 1 setattr(container, content_attr, new_content) @@ -132,8 +138,10 @@ def process_latex_raw(element, doc): - everything else will also have class CONVERTED_OTHER_CLASS """ - if not (isinstance(element, (pf.RawInline, pf.RawBlock)) and - element.format in ("tex", "latex")): + if not ( + isinstance(element, (pf.RawInline, pf.RawBlock)) + and element.format in ("tex", "latex") + ): return None return assess_latex(element.text, isinstance(element, pf.RawBlock)) @@ -164,8 +172,8 @@ def process_latex_str(block, doc): new_content.append(element) continue for string in re.split( - r"(\\[^\{\[]+\{[^\}]+\}|\\[^\{\[]+\[[^\]]*\]\{[^\}]+\})", - element.text): + r"(\\[^\{\[]+\{[^\}]+\}|\\[^\{\[]+\[[^\]]*\]\{[^\}]+\})", element.text + ): if not string: continue new_element = assess_latex(string, False) @@ -196,21 +204,27 @@ def assess_latex(text, is_block): # with https://pypi.org/project/regex/ # find tags with no option, i.e \tag{label} - match_latex_noopts = re.match( - r"^\s*\\([^\{\[]+)\{([^\}]+)\}\s*$", text) + match_latex_noopts = re.match(r"^\s*\\([^\{\[]+)\{([^\}]+)\}\s*$", text) if match_latex_noopts: tag = match_latex_noopts.group(1) content = match_latex_noopts.group(2) if tag in dict(PREFIX_MAP_LATEX_R): new_element = create_cite_span( - content.split(","), "latex", is_block, - prefix=dict(PREFIX_MAP_LATEX_R).get(tag, "")) + content.split(","), + "latex", + is_block, + prefix=dict(PREFIX_MAP_LATEX_R).get(tag, ""), + ) return new_element span = pf.Span( classes=[RAWSPAN_CLASS, CONVERTED_OTHER_CLASS], - attributes={"format": "latex", "tag": tag, - "content": content, "original": text} + attributes={ + "format": "latex", + "tag": tag, + "content": content, + "original": text, + }, ) if is_block: return pf.Plain(span) @@ -218,8 +232,7 @@ def assess_latex(text, is_block): return span # find tags with option, i.e \tag[options]{label} - match_latex_wopts = re.match( - r"^\s*\\([^\{\[]+)\[([^\]]*)\]\{([^\}]+)\}\s*$", text) + match_latex_wopts = re.match(r"^\s*\\([^\{\[]+)\[([^\]]*)\]\{([^\}]+)\}\s*$", text) if match_latex_wopts: tag = match_latex_wopts.group(1) options = match_latex_wopts.group(2) @@ -227,11 +240,13 @@ def assess_latex(text, is_block): span = pf.Span( classes=[RAWSPAN_CLASS, CONVERTED_OTHER_CLASS], - attributes={"format": "latex", - "tag": tag, - "content": content, - "options": options, - "original": text} + attributes={ + "format": "latex", + "tag": tag, + "content": content, + "options": options, + "original": text, + }, ) if is_block: return pf.Plain(span) @@ -277,14 +292,15 @@ def process_rst_roles(block, doc): skip_next = False continue - if not (isinstance(element, pf.Str) - and isinstance(element.next, pf.Code)): + if not (isinstance(element, pf.Str) and isinstance(element.next, pf.Code)): new_content.append(element) continue - if not (len(element.text) > 2 and - element.text.startswith(":") and - element.text.endswith(":")): + if not ( + len(element.text) > 2 + and element.text.startswith(":") + and element.text.endswith(":") + ): new_content.append(element) continue @@ -293,18 +309,23 @@ def process_rst_roles(block, doc): if role in dict(PREFIX_MAP_RST_R): new_element = create_cite_span( - content.split(","), "rst", False, - prefix=dict(PREFIX_MAP_RST_R).get(role, "")) + content.split(","), + "rst", + False, + prefix=dict(PREFIX_MAP_RST_R).get(role, ""), + ) new_content.append(new_element) skip_next = True elif role in RST_KNOWN_ROLES: new_element = pf.Span( classes=[RAWSPAN_CLASS, CONVERTED_OTHER_CLASS], - attributes={"format": "rst", "role": role, - "content": content, - "original": "{0}`{1}`".format( - element.text, element.next.text) - }) + attributes={ + "format": "rst", + "role": role, + "content": content, + "original": "{0}`{1}`".format(element.text, element.next.text), + }, + ) new_content.append(new_element) skip_next = True else: @@ -334,8 +355,9 @@ def gather_processors(element, doc): # apply processors that change multiple inline elements in a block - if (isinstance(element, get_panflute_containers(pf.Inline)) - or isinstance(pf.Table, pf.DefinitionItem)): + if isinstance(element, get_panflute_containers(pf.Inline)) or isinstance( + pf.Table, pf.DefinitionItem + ): new_element = process_html_cites(element, doc) if new_element is not None: @@ -380,31 +402,36 @@ def wrap_rst_directives(doc): final_blocks.append(block) continue - if (isinstance(block.content[0], pf.Str) + if ( + isinstance(block.content[0], pf.Str) and block.content[0].text == ".." - and isinstance(block.content[1], pf.Space) - and isinstance(block.content[2], pf.Str)): + and isinstance(block.content[1], pf.Space) + and isinstance(block.content[2], pf.Str) + ): - if (len(block.content) == 3 + if ( + len(block.content) == 3 and block.content[2].text.startswith("_") - and block.content[2].text.endswith(":")): + and block.content[2].text.endswith(":") + ): # the block is an rst label new_block = pf.Div( block, classes=[RAWDIV_CLASS, CONVERTED_OTHER_CLASS], - attributes={"format": "rst"} + attributes={"format": "rst"}, ) final_blocks.append(new_block) continue - if (block.content[2].text.endswith("::") - and isinstance(block.next, pf.CodeBlock)): + if block.content[2].text.endswith("::") and isinstance( + block.next, pf.CodeBlock + ): # the block is a directive with body content # TODO at present we allow any directive name # the block may contain option directives, e.g. :width: skip_next = True - inline_arg = '' + inline_arg = "" if len(block.content) > 3: inline_content = [] for el in block.content[3:]: @@ -412,27 +439,32 @@ def wrap_rst_directives(doc): break inline_content.append(el) if inline_content: - inline_arg = pf.stringify( - pf.Para(*inline_content)).replace("\n", "").strip() + inline_arg = ( + pf.stringify(pf.Para(*inline_content)) + .replace("\n", "") + .strip() + ) new_block = pf.Div( block, *pf.convert_text(block.next.text), classes=[RAWDIV_CLASS, CONVERTED_DIRECTIVE_CLASS], - attributes={"format": "rst", - "directive": block.content[2].text[:-2], - "inline": inline_arg, - "has_body": True} + attributes={ + "format": "rst", + "directive": block.content[2].text[:-2], + "inline": inline_arg, + "has_body": True, + } ) final_blocks.append(new_block) continue - if (block.content[2].text.endswith("::")): + if block.content[2].text.endswith("::"): # the block is a directive without body content # TODO at present we allow any directive name # the block may contain option directives, e.g. :width: - inline_arg = '' + inline_arg = "" if len(block.content) > 3: inline_content = [] for el in block.content[3:]: @@ -440,16 +472,21 @@ def wrap_rst_directives(doc): break inline_content.append(el) if inline_content: - inline_arg = pf.stringify( - pf.Para(*inline_content)).replace("\n", "").strip() + inline_arg = ( + pf.stringify(pf.Para(*inline_content)) + .replace("\n", "") + .strip() + ) new_block = pf.Div( block, classes=[RAWDIV_CLASS, CONVERTED_DIRECTIVE_CLASS], - attributes={"format": "rst", - "directive": block.content[2].text[:-2], - "inline": inline_arg, - "has_body": False} + attributes={ + "format": "rst", + "directive": block.content[2].text[:-2], + "inline": inline_arg, + "has_body": False, + }, ) final_blocks.append(new_block) continue @@ -474,9 +511,8 @@ def main(doc=None, extract_formats=True): """if extract_formats then convert citations defined in latex, rst or html formats to special Span elements """ - return pf.run_filter(gather_processors, - prepare, finalize, doc=doc) + return pf.run_filter(gather_processors, prepare, finalize, doc=doc) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ipypublish/filters_pandoc/rmarkdown_to_mpe.py b/ipypublish/filters_pandoc/rmarkdown_to_mpe.py index c4499e2..65c6602 100644 --- a/ipypublish/filters_pandoc/rmarkdown_to_mpe.py +++ b/ipypublish/filters_pandoc/rmarkdown_to_mpe.py @@ -28,13 +28,12 @@ def format_code_html(code, doc): # type: (CodeBlock, Doc) -> None - if not (isinstance(code, CodeBlock) - and doc.format in ("html", "html5")): + if not (isinstance(code, CodeBlock) and doc.format in ("html", "html5")): return None - if 'python' in code.attributes.get('data-info', ''): + if "python" in code.attributes.get("data-info", ""): - attr = code.attributes.get('data-info', '') + attr = code.attributes.get("data-info", "") parsed = "cmd='{}'".format(sys.executable) normed = '{{"cmd":"{}"'.format(sys.executable) @@ -58,11 +57,13 @@ def format_code_html(code, doc): normed = normed + "}" - code.attributes['data-info'] = 'python {{{0}}}'.format(parsed) - code.attributes['data-parsed-info'] = ( - '{{"language":"python","attributes":{0}}}'.format(normed)) - code.attributes['data-normalized-info'] = ( - '{{"language":"python","attributes":{0}}}'.format(normed)) + code.attributes["data-info"] = "python {{{0}}}".format(parsed) + code.attributes[ + "data-parsed-info" + ] = '{{"language":"python","attributes":{0}}}'.format(normed) + code.attributes[ + "data-normalized-info" + ] = '{{"language":"python","attributes":{0}}}'.format(normed) doc.last_id = this_id @@ -81,9 +82,8 @@ def finalize(doc): def main(doc=None): # type: (Doc) -> None - return pf.run_filter(format_code_html, - prepare, finalize, doc=doc) + return pf.run_filter(format_code_html, prepare, finalize, doc=doc) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ipypublish/filters_pandoc/tests/test_convert_raw.py b/ipypublish/filters_pandoc/tests/test_convert_raw.py index bc826fb..41c8977 100644 --- a/ipypublish/filters_pandoc/tests/test_convert_raw.py +++ b/ipypublish/filters_pandoc/tests/test_convert_raw.py @@ -1,12 +1,13 @@ from ipypublish.filters_pandoc.utils import apply_filter -from ipypublish.filters_pandoc import (prepare_raw, - format_cite_elements, - format_raw_spans) +from ipypublish.filters_pandoc import ( + prepare_raw, + format_cite_elements, + format_raw_spans, +) def test_mkdown_hlink_to_rst(): - out_string = apply_filter( - "[a link](https://pandoc.org/filters.html)", [], "rst") + out_string = apply_filter("[a link](https://pandoc.org/filters.html)", [], "rst") assert out_string == "`a link `__" @@ -20,29 +21,32 @@ def test_latex_to_rst(): "", r"\ref{label3} \todo{something todo}", "", - r"\todo{something else todo}" + r"\todo{something else todo}", ] - out_string = apply_filter(in_string, - [prepare_raw.main, - format_cite_elements.main, - format_raw_spans.main], "rst") - - assert out_string == "\n".join([ - ":ref:`label1` :ref:`label2` :cite:`a-cite-key_2019`", - "", - ":cite:`label1,label2`", - "", - ":ref:`label3`", - "", - ".. todo:: something todo", - "", - "", - "", - ".. todo:: something else todo", - "", - "" - ]) + out_string = apply_filter( + in_string, + [prepare_raw.main, format_cite_elements.main, format_raw_spans.main], + "rst", + ) + + assert out_string == "\n".join( + [ + ":ref:`label1` :ref:`label2` :cite:`a-cite-key_2019`", + "", + ":cite:`label1,label2`", + "", + ":ref:`label3`", + "", + ".. todo:: something todo", + "", + "", + "", + ".. todo:: something else todo", + "", + "", + ] + ) def test_latex_to_rst_with_numref(): @@ -58,81 +62,68 @@ def test_latex_to_rst_with_numref(): "", r"\ref{label3} \todo[inline]{something todo}", "", - r"\todo{something else todo}" + r"\todo{something else todo}", ] - out_string = apply_filter(in_string, - [prepare_raw.main, - format_cite_elements.main, - format_raw_spans.main], "rst") - - assert out_string.strip() == "\n".join([ - ":numref:`label1` :numref:`label2` :cite:`a-cite-key_2019`", - "", - ":ref:`label3`", - "", - ".. todo:: something todo" - "", - "", - "", - "", - ".. todo:: something else todo" - ]) + out_string = apply_filter( + in_string, + [prepare_raw.main, format_cite_elements.main, format_raw_spans.main], + "rst", + ) + + assert out_string.strip() == "\n".join( + [ + ":numref:`label1` :numref:`label2` :cite:`a-cite-key_2019`", + "", + ":ref:`label3`", + "", + ".. todo:: something todo" "", + "", + "", + "", + ".. todo:: something else todo", + ] + ) def test_html_to_latex_label(): - in_string = [ - "[some text](#alabel)" - ] + in_string = ["[some text](#alabel)"] out_string = apply_filter( - in_string, - [prepare_raw.main, format_cite_elements.main], "latex") + in_string, [prepare_raw.main, format_cite_elements.main], "latex" + ) - assert out_string == "\n".join([ - r"\cref{alabel}" - ]) + assert out_string == "\n".join([r"\cref{alabel}"]) def test_cite_in_table_caption(): - in_string = [ - 'a b', - '- -', - '1 2', - '', - 'Table: Caption \\cite{a}' - ] + in_string = ["a b", "- -", "1 2", "", "Table: Caption \\cite{a}"] out_string = apply_filter( - in_string, - [prepare_raw.main, format_cite_elements.main], "markdown") + in_string, [prepare_raw.main, format_cite_elements.main], "markdown" + ) - assert out_string == "\n".join([ - ' a b', - ' --- ---', - ' 1 2', - '', - ' : Caption [@a]' - ]) + assert out_string == "\n".join( + [" a b", " --- ---", " 1 2", "", " : Caption [@a]"] + ) def test_html_to_latex_cite(): in_string = [ - 'surrounding text text' - "", - '' + 'surrounding text text' "", + '', ] out_string = apply_filter( - in_string, - [prepare_raw.main, format_cite_elements.main], "latex") + in_string, [prepare_raw.main, format_cite_elements.main], "latex" + ) - assert out_string == "\n".join([ - r"surrounding \cite{cite_key} text \cite{cite_key2}" - ]) + assert out_string == "\n".join( + [r"surrounding \cite{cite_key} text \cite{cite_key2}"] + ) def test_html_to_rst_cite(): @@ -140,217 +131,188 @@ def test_html_to_rst_cite(): in_string = [ 'surrounding text text', "", - '' + '', ] out_string = apply_filter( - in_string, - [prepare_raw.main, format_cite_elements.main], "rst") + in_string, [prepare_raw.main, format_cite_elements.main], "rst" + ) - assert out_string == "\n".join([ - "surrounding :cite:`cite_key` text", - "", - ":cite:`cite_key2`" - ]) + assert out_string == "\n".join( + ["surrounding :cite:`cite_key` text", "", ":cite:`cite_key2`"] + ) def test_citations_latex(): in_string = [ - '@label1', - '', - '[@label1;@label2]', - '', - '[an internal link](#label2)' - '', - '[an external link](http://something.org)', - '', - '![a citation @label](path/to/image.png)', - ] - - out_string = apply_filter( - in_string, - [prepare_raw.main, format_cite_elements.main], "latex") - - assert out_string.strip() == "\n".join([ - "\\cite{label1}", + "@label1", "", - "\\cite{label1,label2}", + "[@label1;@label2]", "", - "\\cref{label2} \\href{http://something.org}{an external link}", + "[an internal link](#label2)" "", + "[an external link](http://something.org)", "", - "\\begin{figure}", - "\\centering", - "\\includegraphics{path/to/image.png}", - "\\caption{a citation \\cite{label}}", - "\\end{figure}" - ]) + "![a citation @label](path/to/image.png)", + ] + + out_string = apply_filter( + in_string, [prepare_raw.main, format_cite_elements.main], "latex" + ) + + assert out_string.strip() == "\n".join( + [ + "\\cite{label1}", + "", + "\\cite{label1,label2}", + "", + "\\cref{label2} \\href{http://something.org}{an external link}", + "", + "\\begin{figure}", + "\\centering", + "\\includegraphics{path/to/image.png}", + "\\caption{a citation \\cite{label}}", + "\\end{figure}", + ] + ) def test_citations_rst(): in_string = [ - '@label1', - '', - '[an internal link](#label2)' - '', - '[an external link](http://something.org)', - '', - '![a citation @label](path/to/image.png)', - + "@label1", + "", + "[an internal link](#label2)" "", + "[an external link](http://something.org)", + "", + "![a citation @label](path/to/image.png)", ] out_string = apply_filter( - in_string, - [prepare_raw.main, format_cite_elements.main], "rst") - - assert out_string.strip() == "\n".join([ - ":cite:`label1`", - "", - ":ref:`label2` `an external link `__", - "", - ".. figure:: path/to/image.png", - " :alt: a citation :cite:`label`", - "", - " a citation :cite:`label`" - ]) + in_string, [prepare_raw.main, format_cite_elements.main], "rst" + ) + + assert out_string.strip() == "\n".join( + [ + ":cite:`label1`", + "", + ":ref:`label2` `an external link `__", + "", + ".. figure:: path/to/image.png", + " :alt: a citation :cite:`label`", + "", + " a citation :cite:`label`", + ] + ) def test_rst_cite_to_rst(): - in_string = [ - 'a :ref:`label` b' - ] + in_string = ["a :ref:`label` b"] out_string = apply_filter( - in_string, - [prepare_raw.main, format_cite_elements.main], "rst") + in_string, [prepare_raw.main, format_cite_elements.main], "rst" + ) - assert out_string.strip() == "\n".join([ - 'a :ref:`label` b' - ]) + assert out_string.strip() == "\n".join(["a :ref:`label` b"]) def test_rst_cite_to_latex(): - in_string = [ - 'a :ref:`label` b' - ] + in_string = ["a :ref:`label` b"] out_string = apply_filter( - in_string, - [prepare_raw.main, format_cite_elements.main], "latex") + in_string, [prepare_raw.main, format_cite_elements.main], "latex" + ) - assert out_string.strip() == "\n".join([ - r'a \ref{label} b' - ]) + assert out_string.strip() == "\n".join([r"a \ref{label} b"]) def test_rst_known_role_to_rst(): - in_string = [ - 'a :py:func:`label` b' - ] + in_string = ["a :py:func:`label` b"] out_string = apply_filter( - in_string, - [prepare_raw.main, format_raw_spans.main], "rst") + in_string, [prepare_raw.main, format_raw_spans.main], "rst" + ) - assert out_string.strip() == "\n".join([ - 'a :py:func:`label` b' - ]) + assert out_string.strip() == "\n".join(["a :py:func:`label` b"]) def test_rst_directive_to_rst(): - in_string = [ - '.. versionchanged:: v0.8.3', - '', - ' abc', - '', - ' xyz' - ] + in_string = [".. versionchanged:: v0.8.3", "", " abc", "", " xyz"] out_string = apply_filter( - in_string, - [prepare_raw.main, format_raw_spans.main], "rst") + in_string, [prepare_raw.main, format_raw_spans.main], "rst" + ) - assert out_string.strip() == "\n".join([ - '.. versionchanged:: v0.8.3', - '', - ' abc', - '', - ' xyz' - ]) + assert out_string.strip() == "\n".join( + [".. versionchanged:: v0.8.3", "", " abc", "", " xyz"] + ) def test_rst_directive_to_latex(): - in_string = [ - '.. versionchanged:: v0.8.3', - '', - ' abc', - '', - ' xyz' - ] + in_string = [".. versionchanged:: v0.8.3", "", " abc", "", " xyz"] out_string = apply_filter( - in_string, - [prepare_raw.main, format_raw_spans.main], "latex") - - assert out_string.strip() == "\n".join([ - '\\begin{mdframed}[frametitle={versionchanged},frametitlerule=true]', - '\\mdfsubtitle{v0.8.3}', - '', - 'abc', - '', - 'xyz', - '', - '\\end{mdframed}', - ]) + in_string, [prepare_raw.main, format_raw_spans.main], "latex" + ) + + assert out_string.strip() == "\n".join( + [ + "\\begin{mdframed}[frametitle={versionchanged},frametitlerule=true]", + "\\mdfsubtitle{v0.8.3}", + "", + "abc", + "", + "xyz", + "", + "\\end{mdframed}", + ] + ) def test_rst_directive_with_options_to_rst(): in_string = [ - '.. dir::', - ' :maxdepth: 2', - ' :numbered:', - '', - ' abc', - ' xyz', - '', - ' new paragraph', - '' + ".. dir::", + " :maxdepth: 2", + " :numbered:", + "", + " abc", + " xyz", + "", + " new paragraph", + "", ] out_string = apply_filter( - in_string, - [prepare_raw.main, format_raw_spans.main], "rst") - - assert out_string == "\n".join([ - '.. dir::', - ' :maxdepth: 2', - ' :numbered:', - '', - ' abc', - ' xyz', - '', - ' new paragraph', - "", - "" - ]) + in_string, [prepare_raw.main, format_raw_spans.main], "rst" + ) + + assert out_string == "\n".join( + [ + ".. dir::", + " :maxdepth: 2", + " :numbered:", + "", + " abc", + " xyz", + "", + " new paragraph", + "", + "", + ] + ) def test_rst_label_to_rst(): - in_string = [ - '.. _alabel:' - ] + in_string = [".. _alabel:"] out_string = apply_filter( - in_string, - [prepare_raw.main, format_raw_spans.main], "rst") + in_string, [prepare_raw.main, format_raw_spans.main], "rst" + ) - assert out_string.strip() == "\n".join([ - '.. _alabel:' - ]) + assert out_string.strip() == "\n".join([".. _alabel:"]) diff --git a/ipypublish/filters_pandoc/tests/test_format_cite_elements.py b/ipypublish/filters_pandoc/tests/test_format_cite_elements.py index cb12b2f..899ddee 100644 --- a/ipypublish/filters_pandoc/tests/test_format_cite_elements.py +++ b/ipypublish/filters_pandoc/tests/test_format_cite_elements.py @@ -1,71 +1,58 @@ from ipypublish.filters_pandoc.utils import apply_filter -from ipypublish.filters_pandoc import (prepare_cites, - format_cite_elements) +from ipypublish.filters_pandoc import prepare_cites, format_cite_elements def test_multiple_references_rst(): """ """ - in_string = [ - 'multiple references +[@fig:id; @tbl:id; @eq:id1]' - ] + in_string = ["multiple references +[@fig:id; @tbl:id; @eq:id1]"] - out_string = apply_filter(in_string, - [prepare_cites.main, - format_cite_elements.main], "rst") + out_string = apply_filter( + in_string, [prepare_cites.main, format_cite_elements.main], "rst" + ) print(out_string) - assert out_string == "\n".join([ - "multiple references :ref:`fig:id`, :ref:`tbl:id` and :ref:`eq:id1`" - ]) + assert out_string == "\n".join( + ["multiple references :ref:`fig:id`, :ref:`tbl:id` and :ref:`eq:id1`"] + ) def test_multiple_references_latex(): """ """ - in_string = [ - 'multiple references +[@fig:id; @tbl:id; @eq:id1]' - ] + in_string = ["multiple references +[@fig:id; @tbl:id; @eq:id1]"] - out_string = apply_filter(in_string, - [prepare_cites.main, - format_cite_elements.main], "latex") + out_string = apply_filter( + in_string, [prepare_cites.main, format_cite_elements.main], "latex" + ) print(out_string) - assert out_string == "\n".join([ - "multiple references \\cref{fig:id,tbl:id,eq:id1}" - ]) + assert out_string == "\n".join(["multiple references \\cref{fig:id,tbl:id,eq:id1}"]) def test_reference_prefixes_latex(): """ """ - in_string = [ - '(?@key1 &@key2 =@key3)' - ] + in_string = ["(?@key1 &@key2 =@key3)"] - out_string = apply_filter(in_string, - [prepare_cites.main, - format_cite_elements.main], "latex") + out_string = apply_filter( + in_string, [prepare_cites.main, format_cite_elements.main], "latex" + ) print(out_string) - assert out_string == "\n".join([ - "(\\Cref{key1} \\gls{key2} \\eqref{key3})" - ]) + assert out_string == "\n".join(["(\\Cref{key1} \\gls{key2} \\eqref{key3})"]) def test_reference_prefixes_rst(): """ """ - in_string = [ - '(?@key1 &@key2 %@key3 =@key4)' - ] + in_string = ["(?@key1 &@key2 %@key3 =@key4)"] - out_string = apply_filter(in_string, - [prepare_cites.main, - format_cite_elements.main], "rst") + out_string = apply_filter( + in_string, [prepare_cites.main, format_cite_elements.main], "rst" + ) print(out_string) - assert out_string == "\n".join([ - "(:ref:`key1` :gls:`key2` :glsc:`key3` :eq:`key4`)" - ]) + assert out_string == "\n".join( + ["(:ref:`key1` :gls:`key2` :glsc:`key3` :eq:`key4`)"] + ) diff --git a/ipypublish/filters_pandoc/tests/test_format_label_elements.py b/ipypublish/filters_pandoc/tests/test_format_label_elements.py index 17594ec..3c98776 100644 --- a/ipypublish/filters_pandoc/tests/test_format_label_elements.py +++ b/ipypublish/filters_pandoc/tests/test_format_label_elements.py @@ -6,193 +6,311 @@ def test_math_span_latex(): - in_json = {"blocks": [{"t": "Para", "c": [ - {"t": "Span", "c": [ - ["a", ["labelled-Math"], [["b", "2"]]], - [{"t": "Math", "c": [{"t": "DisplayMath"}, "a=1"]}]]} - ]}], "pandoc-api-version": [1, 17, 5, 1], + in_json = { + "blocks": [ + { + "t": "Para", + "c": [ + { + "t": "Span", + "c": [ + ["a", ["labelled-Math"], [["b", "2"]]], + [{"t": "Math", "c": [{"t": "DisplayMath"}, "a=1"]}], + ], + } + ], + } + ], + "pandoc-api-version": [1, 17, 5, 1], "meta": { - "$$references": {"t": "MetaMap", "c": { - "a": {"t": "MetaMap", "c": { - "type": {"t": "MetaString", "c": "Math"}, - "number": {"t": "MetaString", "c": "1"}}}}}}} + "$$references": { + "t": "MetaMap", + "c": { + "a": { + "t": "MetaMap", + "c": { + "type": {"t": "MetaString", "c": "Math"}, + "number": {"t": "MetaString", "c": "1"}, + }, + } + }, + } + }, + } out_string = apply_filter( - in_json, format_label_elements.main, "latex", in_format="json") + in_json, format_label_elements.main, "latex", in_format="json" + ) - assert out_string.strip() == "\n".join([ - r"\begin{equation}a=1\label{a}\end{equation}" - ]) + assert out_string.strip() == "\n".join( + [r"\begin{equation}a=1\label{a}\end{equation}"] + ) def test_math_span_rst(): - in_json = {"blocks": [{"t": "Para", "c": [ - {"t": "Span", "c": [ - ["a", ["labelled-Math"], [["b", "2"]]], - [{"t": "Math", "c": [{"t": "DisplayMath"}, "a=1"]}]]} - ]}], "pandoc-api-version": [1, 17, 5, 1], + in_json = { + "blocks": [ + { + "t": "Para", + "c": [ + { + "t": "Span", + "c": [ + ["a", ["labelled-Math"], [["b", "2"]]], + [{"t": "Math", "c": [{"t": "DisplayMath"}, "a=1"]}], + ], + } + ], + } + ], + "pandoc-api-version": [1, 17, 5, 1], "meta": { - "$$references": {"t": "MetaMap", "c": { - "a": {"t": "MetaMap", "c": { - "type": {"t": "MetaString", "c": "Math"}, - "number": {"t": "MetaString", "c": "1"}}}}}}} + "$$references": { + "t": "MetaMap", + "c": { + "a": { + "t": "MetaMap", + "c": { + "type": {"t": "MetaString", "c": "Math"}, + "number": {"t": "MetaString", "c": "1"}, + }, + } + }, + } + }, + } out_string = apply_filter( - in_json, format_label_elements.main, "rst", in_format="json") + in_json, format_label_elements.main, "rst", in_format="json" + ) - assert out_string.strip() == "\n".join([ - ".. math::", - " :nowrap:", - " :label: a", - "", - r" \begin{equation}a=1\end{equation}" - ]) + assert out_string.strip() == "\n".join( + [ + ".. math::", + " :nowrap:", + " :label: a", + "", + r" \begin{equation}a=1\end{equation}", + ] + ) @pytest.mark.skip( - reason="there's an issue with pandoc outputting unicode in '/em> = 1'") + reason="there's an issue with pandoc outputting unicode in '/em> = 1'" +) def test_math_span_html(): - in_json = {"blocks": [{"t": "Para", "c": [ - {"t": "Span", "c": [ - ["a", ["labelled-Math"], [["b", "2"]]], - [{"t": "Math", "c": [{"t": "DisplayMath"}, "a=1"]}]]} - ]}], "pandoc-api-version": [1, 17, 5, 1], + in_json = { + "blocks": [ + { + "t": "Para", + "c": [ + { + "t": "Span", + "c": [ + ["a", ["labelled-Math"], [["b", "2"]]], + [{"t": "Math", "c": [{"t": "DisplayMath"}, "a=1"]}], + ], + } + ], + } + ], + "pandoc-api-version": [1, 17, 5, 1], "meta": { - "$$references": {"t": "MetaMap", "c": { - "a": {"t": "MetaMap", "c": { - "type": {"t": "MetaString", "c": "Math"}, - "number": {"t": "MetaString", "c": "1"}}}}}}} + "$$references": { + "t": "MetaMap", + "c": { + "a": { + "t": "MetaMap", + "c": { + "type": {"t": "MetaString", "c": "Math"}, + "number": {"t": "MetaString", "c": "1"}, + }, + } + }, + } + }, + } out_string = apply_filter( - in_json, format_label_elements.main, "html", in_format="json") + in_json, format_label_elements.main, "html", in_format="json" + ) - assert out_string.strip() == "\n".join([ - '

' - '
' - 'a = 1' - '
' - '

' - ]) + assert out_string.strip() == "\n".join( + [ + '

' + "
" + 'a = 1' + "
" + "

" + ] + ) def test_math_md_to_rst(): in_str = [ "$$a = b$$ {#eq:id1}", - "$$c &= d \\\\ other &= e$$ {#a env=align .unnumbered}" + "$$c &= d \\\\ other &= e$$ {#a env=align .unnumbered}", ] out_string = apply_filter( - in_str, [prepare_labels.main, format_label_elements.main], - in_format="markdown", out_format="rst") - - assert out_string.strip() == "\n".join([ - ".. math::", - " :nowrap:", - " :label: eq:id1", - "", - r" \begin{equation}a = b\end{equation}", - "", - "", - "", - ".. math::", - " :nowrap:", - " :label: a", - "", - r" \begin{align*}c &= d \\ other &= e\end{align*}" - ]) + in_str, + [prepare_labels.main, format_label_elements.main], + in_format="markdown", + out_format="rst", + ) + + assert out_string.strip() == "\n".join( + [ + ".. math::", + " :nowrap:", + " :label: eq:id1", + "", + r" \begin{equation}a = b\end{equation}", + "", + "", + "", + ".. math::", + " :nowrap:", + " :label: a", + "", + r" \begin{align*}c &= d \\ other &= e\end{align*}", + ] + ) def test_image_html(): """ """ # "![a title](path/to/image.png){#label1 .class-name a=5}" - in_json = ( - {"blocks": [ - {"t": "Para", "c": [ - {"t": "Image", "c": [ - ["label1", - ["class-name"], - [["a", "5"]]], - [{"t": "Str", "c": "a"}, - {"t": "Space"}, {"t": "Str", "c": "title"}], - ["path/to/image.png", "fig:"]]}]}], - "pandoc-api-version": [1, 17, 5, 1], "meta": {}} - ) + in_json = { + "blocks": [ + { + "t": "Para", + "c": [ + { + "t": "Image", + "c": [ + ["label1", ["class-name"], [["a", "5"]]], + [ + {"t": "Str", "c": "a"}, + {"t": "Space"}, + {"t": "Str", "c": "title"}, + ], + ["path/to/image.png", "fig:"], + ], + } + ], + } + ], + "pandoc-api-version": [1, 17, 5, 1], + "meta": {}, + } out_string = apply_filter( - in_json, format_label_elements.main, "html", in_format="json") + in_json, format_label_elements.main, "html", in_format="json" + ) - assert out_string.strip() == "\n".join([ - '

' - 'a title' - '

' - ]) + assert out_string.strip() == "\n".join( + [ + '

' + 'a title' + "

" + ] + ) def test_image_rst(): """ """ # "![a title](path/to/image.png){#label1 .class-name a=5}" - in_json = ( - {"blocks": [ - {"t": "Para", "c": [ - {"t": "Image", "c": [ - ["label1", - ["class-name"], - [["a", "5"]]], - [{"t": "Str", "c": "a"}, - {"t": "Space"}, {"t": "Str", "c": "title"}], - ["path/to/image.png", "fig:"]]}]}], - "pandoc-api-version": [1, 17, 5, 1], "meta": {}} - ) + in_json = { + "blocks": [ + { + "t": "Para", + "c": [ + { + "t": "Image", + "c": [ + ["label1", ["class-name"], [["a", "5"]]], + [ + {"t": "Str", "c": "a"}, + {"t": "Space"}, + {"t": "Str", "c": "title"}, + ], + ["path/to/image.png", "fig:"], + ], + } + ], + } + ], + "pandoc-api-version": [1, 17, 5, 1], + "meta": {}, + } out_string = apply_filter( - in_json, format_label_elements.main, "rst", in_format="json") + in_json, format_label_elements.main, "rst", in_format="json" + ) - assert out_string.strip() == "\n".join([ - ".. figure:: path/to/image.png", - " :alt: a title", - " :figclass: class-name", - " :name: label1", - "", - " a title" - ]) + assert out_string.strip() == "\n".join( + [ + ".. figure:: path/to/image.png", + " :alt: a title", + " :figclass: class-name", + " :name: label1", + "", + " a title", + ] + ) def test_image_latex(): """ """ # "![a title](path/to/image.png){#label1 .class-name a=5}" - in_json = ( - {"blocks": [ - {"t": "Para", "c": [ - {"t": "Image", "c": [ - ["label1", - ["class-name"], - [["a", "5"]]], - [{"t": "Str", "c": "a"}, - {"t": "Space"}, {"t": "Str", "c": "title"}], - ["path/to/image.png", "fig:"]]}]}], - "pandoc-api-version": [1, 17, 5, 1], "meta": {}} - ) + in_json = { + "blocks": [ + { + "t": "Para", + "c": [ + { + "t": "Image", + "c": [ + ["label1", ["class-name"], [["a", "5"]]], + [ + {"t": "Str", "c": "a"}, + {"t": "Space"}, + {"t": "Str", "c": "title"}, + ], + ["path/to/image.png", "fig:"], + ], + } + ], + } + ], + "pandoc-api-version": [1, 17, 5, 1], + "meta": {}, + } out_string = apply_filter( - in_json, format_label_elements.main, "latex", in_format="json") + in_json, format_label_elements.main, "latex", in_format="json" + ) - assert out_string.strip() == "\n".join([ - r"\begin{figure}[]", - r"\hypertarget{label1}{%", - r"\begin{center}", - r"\adjustimage{max size={0.9\linewidth}{0.9\paperheight},}" - r"{path/to/image.png}", - r"\end{center}", - r"\caption{a title}\label{label1}", - "}", - r"\end{figure}" - ]) + assert out_string.strip() == "\n".join( + [ + r"\begin{figure}[]", + r"\hypertarget{label1}{%", + r"\begin{center}", + r"\adjustimage{max size={0.9\linewidth}{0.9\paperheight},}" + r"{path/to/image.png}", + r"\end{center}", + r"\caption{a title}\label{label1}", + "}", + r"\end{figure}", + ] + ) def test_table_html(): @@ -206,60 +324,93 @@ def test_table_html(): Table: Caption. {#tbl:id} """ - in_json = ( - { - "pandoc-api-version": [1, 17, 5, 1], - "meta": { - "$$references": {"t": "MetaMap", "c": { - "tbl:id": {"t": "MetaMap", "c": { - "type": {"t": "MetaString", "c": "Table"}, - "number": {"t": "MetaString", "c": "1"}}}}}}, - "blocks": [{"t": "Para", "c": [ - {"t": "Str", "c": "Some"}, - {"t": "Space"}, - {"t": "Str", "c": "text"}]}, - {"t": "Div", "c": [ + in_json = { + "pandoc-api-version": [1, 17, 5, 1], + "meta": { + "$$references": { + "t": "MetaMap", + "c": { + "tbl:id": { + "t": "MetaMap", + "c": { + "type": {"t": "MetaString", "c": "Table"}, + "number": {"t": "MetaString", "c": "1"}, + }, + } + }, + } + }, + "blocks": [ + { + "t": "Para", + "c": [ + {"t": "Str", "c": "Some"}, + {"t": "Space"}, + {"t": "Str", "c": "text"}, + ], + }, + { + "t": "Div", + "c": [ ["tbl:id", ["labelled-Table"], []], - [{"t": "Table", "c": [ - [{"t": "Str", "c": "Caption."}, - {"t": "Space"}], - [{"t": "AlignDefault"}, - {"t": "AlignDefault"}], - [0, 0], - [[{"t": "Plain", "c": [{"t": "Str", "c": "a"}]}], - [{"t": "Plain", "c": [{"t": "Str", "c": "b"}]}]], - [[[{"t": "Plain", "c": [{"t": "Str", "c": "1"}]}], - [{"t": "Plain", "c": [{"t": "Str", "c": "2"}]}]], - [[{"t": "Plain", "c": [{"t": "Str", "c": "4"}]}], - [{"t": "Plain", "c": [{"t": "Str", "c": "5"}]}] - ]]]}]]}]} - ) + [ + { + "t": "Table", + "c": [ + [{"t": "Str", "c": "Caption."}, {"t": "Space"}], + [{"t": "AlignDefault"}, {"t": "AlignDefault"}], + [0, 0], + [ + [{"t": "Plain", "c": [{"t": "Str", "c": "a"}]}], + [{"t": "Plain", "c": [{"t": "Str", "c": "b"}]}], + ], + [ + [ + [{"t": "Plain", "c": [{"t": "Str", "c": "1"}]}], + [{"t": "Plain", "c": [{"t": "Str", "c": "2"}]}], + ], + [ + [{"t": "Plain", "c": [{"t": "Str", "c": "4"}]}], + [{"t": "Plain", "c": [{"t": "Str", "c": "5"}]}], + ], + ], + ], + } + ], + ], + }, + ], + } out_string = apply_filter( - in_json, format_label_elements.main, "html", in_format="json") - - assert out_string.strip() == "\n".join([ - '

Some text

', - '', - '', - '', - '', - '', - '', - '', - '', - '', - '', - '', - '', - '', - '', - '', - '', - '', - '', - '', - '
Caption.
ab
12
45
', - '
']) + in_json, format_label_elements.main, "html", in_format="json" + ) + + assert out_string.strip() == "\n".join( + [ + "

Some text

", + '', + "", + "", + "", + '', + "", + "", + "", + "", + "", + '', + "", + "", + "", + '', + "", + "", + "", + "", + "
Caption.
ab
12
45
", + "
", + ] + ) def test_table_rst(): @@ -273,37 +424,65 @@ def test_table_rst(): Table: Caption. {#tbl:id} """ - in_json = ( - { - "pandoc-api-version": [1, 17, 5, 1], - "meta": { - "$$references": {"t": "MetaMap", "c": { - "tbl:id": {"t": "MetaMap", "c": { - "type": {"t": "MetaString", "c": "Table"}, - "number": {"t": "MetaString", "c": "1"}}}}}}, - "blocks": [{"t": "Para", "c": [ - {"t": "Str", "c": "Some"}, - {"t": "Space"}, - {"t": "Str", "c": "text"}]}, - {"t": "Div", "c": [ + in_json = { + "pandoc-api-version": [1, 17, 5, 1], + "meta": { + "$$references": { + "t": "MetaMap", + "c": { + "tbl:id": { + "t": "MetaMap", + "c": { + "type": {"t": "MetaString", "c": "Table"}, + "number": {"t": "MetaString", "c": "1"}, + }, + } + }, + } + }, + "blocks": [ + { + "t": "Para", + "c": [ + {"t": "Str", "c": "Some"}, + {"t": "Space"}, + {"t": "Str", "c": "text"}, + ], + }, + { + "t": "Div", + "c": [ ["tbl:id", ["labelled-Table"], []], - [{"t": "Table", "c": [ - [{"t": "Str", "c": "Caption."}, - {"t": "Space"}], - [{"t": "AlignDefault"}, - {"t": "AlignDefault"}], - [0, 0], - [[{"t": "Plain", "c": [{"t": "Str", "c": "a"}]}], - [{"t": "Plain", "c": [{"t": "Str", "c": "b"}]}]], - [[[{"t": "Plain", "c": [{"t": "Str", "c": "1"}]}], - [{"t": "Plain", "c": [{"t": "Str", "c": "2"}]}]], - [[{"t": "Plain", "c": [{"t": "Str", "c": "4"}]}], - [{"t": "Plain", "c": [{"t": "Str", "c": "5"}]}] - ]]]}]]}]} - ) + [ + { + "t": "Table", + "c": [ + [{"t": "Str", "c": "Caption."}, {"t": "Space"}], + [{"t": "AlignDefault"}, {"t": "AlignDefault"}], + [0, 0], + [ + [{"t": "Plain", "c": [{"t": "Str", "c": "a"}]}], + [{"t": "Plain", "c": [{"t": "Str", "c": "b"}]}], + ], + [ + [ + [{"t": "Plain", "c": [{"t": "Str", "c": "1"}]}], + [{"t": "Plain", "c": [{"t": "Str", "c": "2"}]}], + ], + [ + [{"t": "Plain", "c": [{"t": "Str", "c": "4"}]}], + [{"t": "Plain", "c": [{"t": "Str", "c": "5"}]}], + ], + ], + ], + } + ], + ], + }, + ], + } out_string = apply_filter( - in_json, format_label_elements.main, "rst", in_format="json") + in_json, format_label_elements.main, "rst", in_format="json" + ) - assert out_string.strip().splitlines()[0:3] == [ - 'Some text', '', '.. _`tbl:id`:' - ] + assert out_string.strip().splitlines()[0:3] == ["Some text", "", ".. _`tbl:id`:"] diff --git a/ipypublish/filters_pandoc/tests/test_jinja_filter.py b/ipypublish/filters_pandoc/tests/test_jinja_filter.py index 5dde222..417f096 100644 --- a/ipypublish/filters_pandoc/tests/test_jinja_filter.py +++ b/ipypublish/filters_pandoc/tests/test_jinja_filter.py @@ -4,67 +4,93 @@ def test_basic(): - out_str = jinja_filter('a', 'rst', {}, {}) - assert out_str == 'a' + out_str = jinja_filter("a", "rst", {}, {}) + assert out_str == "a" def test_reference(): - out_str = jinja_filter('@label', 'rst', {}, {}) - assert out_str == ':cite:`label`' + out_str = jinja_filter("@label", "rst", {}, {}) + assert out_str == ":cite:`label`" def test_reference_prefix(): - out_str = jinja_filter('+@label', 'rst', {}, {}) - assert out_str == ':numref:`label`' + out_str = jinja_filter("+@label", "rst", {}, {}) + assert out_str == ":numref:`label`" def test_option_in_nb_meta(): - out_str = jinja_filter('+@label', 'rst', create_ipub_meta({'use_numref': False}), {}) - assert out_str == ':ref:`label`' + out_str = jinja_filter( + "+@label", "rst", create_ipub_meta({"use_numref": False}), {} + ) + assert out_str == ":ref:`label`" def test_option_in_cell_meta(): - out_str = jinja_filter('+@label', 'rst', create_ipub_meta({'use_numref': False}), - create_ipub_meta({'use_numref': True})) - assert out_str == ':numref:`label`' + out_str = jinja_filter( + "+@label", + "rst", + create_ipub_meta({"use_numref": False}), + create_ipub_meta({"use_numref": True}), + ) + assert out_str == ":numref:`label`" def test_option_in_top_matter(): # TODO create ipub yaml from IPUB_META_ROUTE - in_str = '\n'.join(['---', 'ipub:', ' pandoc:', ' use_numref: true', '', '...', '', '+@label']) + in_str = "\n".join( + ["---", "ipub:", " pandoc:", " use_numref: true", "", "...", "", "+@label"] + ) - out_str = jinja_filter(in_str, 'rst', create_ipub_meta({'use_numref': False}), {}) - assert out_str == ':numref:`label`' + out_str = jinja_filter(in_str, "rst", create_ipub_meta({"use_numref": False}), {}) + assert out_str == ":numref:`label`" def test_at_notation_false(): - out_str = jinja_filter('+@label', 'rst', create_ipub_meta({'at_notation': False}), {}) - assert out_str == '+ :cite:`label`' + out_str = jinja_filter( + "+@label", "rst", create_ipub_meta({"at_notation": False}), {} + ) + assert out_str == "+ :cite:`label`" def test_remove_filter(): - out_str = jinja_filter('+@label', 'rst', create_ipub_meta({'apply_filters': False}), {}) - assert out_str == '+@label' + out_str = jinja_filter( + "+@label", "rst", create_ipub_meta({"apply_filters": False}), {} + ) + assert out_str == "+@label" def test_complex_equation(): in_source = [ - '$$\\begin{equation*}\n', 'f(x) = \\left\\{\n', '\\begin{array}{ll}\n', '\\; x \\qquad x \\geq 0 \\\\\n', - '\\; 0 \\qquad else\n', '\\end{array}\n', '\\right.\n', '\\end{equation*}$$' + "$$\\begin{equation*}\n", + "f(x) = \\left\\{\n", + "\\begin{array}{ll}\n", + "\\; x \\qquad x \\geq 0 \\\\\n", + "\\; 0 \\qquad else\n", + "\\end{array}\n", + "\\right.\n", + "\\end{equation*}$$", ] - out_string = jinja_filter(''.join(in_source), 'rst', create_ipub_meta({}), {}) + out_string = jinja_filter("".join(in_source), "rst", create_ipub_meta({}), {}) expected = [ - '.. math::', ' :nowrap:', '', ' \\begin{equation*}', ' f(x) = \\left\\{', ' \\begin{array}{ll}', - ' \\; x \\qquad x \\geq 0 \\\\', ' \\; 0 \\qquad else', ' \\end{array}', ' \\right.', - ' \\end{equation*}' + ".. math::", + " :nowrap:", + "", + " \\begin{equation*}", + " f(x) = \\left\\{", + " \\begin{array}{ll}", + " \\; x \\qquad x \\geq 0 \\\\", + " \\; 0 \\qquad else", + " \\end{array}", + " \\right.", + " \\end{equation*}", ] - assert out_string.strip() == '\n'.join(expected) + assert out_string.strip() == "\n".join(expected) diff --git a/ipypublish/filters_pandoc/tests/test_prepare_cites.py b/ipypublish/filters_pandoc/tests/test_prepare_cites.py index 9747f9c..f3555ad 100644 --- a/ipypublish/filters_pandoc/tests/test_prepare_cites.py +++ b/ipypublish/filters_pandoc/tests/test_prepare_cites.py @@ -6,17 +6,25 @@ def test_para_rst(): """ """ - in_string = ['+@label{.class a=1} xyz *@label2* @label3{.b}', '', '(@label4{})', '', '(@label5{.b} x)'] - out_string = apply_filter(in_string, main, 'rst') + in_string = [ + "+@label{.class a=1} xyz *@label2* @label3{.b}", + "", + "(@label4{})", + "", + "(@label5{.b} x)", + ] + out_string = apply_filter(in_string, main, "rst") - assert out_string.strip() == '\n'.join(['@label xyz *@label2* @label3', '', '(@label4)', '', '(@label5 x)']) + assert out_string.strip() == "\n".join( + ["@label xyz *@label2* @label3", "", "(@label4)", "", "(@label5 x)"] + ) def test_para_html(data_regression): """ """ - in_string = ['+@label{ .class a=1} xyz *@label2* @label3{ .b}'] - out_string = apply_filter(in_string, main, 'html') + in_string = ["+@label{ .class a=1} xyz *@label2* @label3{ .b}"] + out_string = apply_filter(in_string, main, "html") parser = HTML2JSONParser() parser.feed(out_string) @@ -40,8 +48,8 @@ def test_para_html(data_regression): def test_table_html(data_regression): """ """ - in_string = ['a b', '- -', 'x y', '', 'Table: Caption +@label'] - out_string = apply_filter(in_string, main, 'html') + in_string = ["a b", "- -", "x y", "", "Table: Caption +@label"] + out_string = apply_filter(in_string, main, "html") parser = HTML2JSONParser() parser.feed(out_string) @@ -71,8 +79,8 @@ def test_table_html(data_regression): def test_image_html(data_regression): - in_string = ['![a title with a @label1 +@label2 {.nclass x=3}](path/to/image.png)'] - out_string = apply_filter(in_string, main, 'html') + in_string = ["![a title with a @label1 +@label2 {.nclass x=3}](path/to/image.png)"] + out_string = apply_filter(in_string, main, "html") parser = HTML2JSONParser() parser.feed(out_string) diff --git a/ipypublish/filters_pandoc/tests/test_prepare_labels.py b/ipypublish/filters_pandoc/tests/test_prepare_labels.py index a64222d..e983a2b 100644 --- a/ipypublish/filters_pandoc/tests/test_prepare_labels.py +++ b/ipypublish/filters_pandoc/tests/test_prepare_labels.py @@ -28,84 +28,95 @@ def test_basic(): "meta": {}, "pandoc-api-version": [1, 17, 5, 1], "blocks": [ - {"t": "Para", - "c": [{"t": "Math", "c": [{"t": "InlineMath"}, "a=1"]}, - {"t": "Space"}, - {"t": "Str", "c": "{#a"}, - {"t": "Space"}, - {"t": "Str", "c": ".a"}, - {"t": "Space"}, - {"t": "Str", "c": "b="}, - {"t": "Math", "c": [ - {"t": "InlineMath"}, "2"]}, {"t": "Str", "c": "}"}]}, - {"t": "Para", "c": [ - {"t": "Image", - "c": [["b", ["x"], [["a", "$1$"], ["b", "2"]]], - [{"t": "Str", "c": "a"}], ["a/b.jpg", "fig:"]] - }]}] + { + "t": "Para", + "c": [ + {"t": "Math", "c": [{"t": "InlineMath"}, "a=1"]}, + {"t": "Space"}, + {"t": "Str", "c": "{#a"}, + {"t": "Space"}, + {"t": "Str", "c": ".a"}, + {"t": "Space"}, + {"t": "Str", "c": "b="}, + {"t": "Math", "c": [{"t": "InlineMath"}, "2"]}, + {"t": "Str", "c": "}"}, + ], + }, + { + "t": "Para", + "c": [ + { + "t": "Image", + "c": [ + ["b", ["x"], [["a", "$1$"], ["b", "2"]]], + [{"t": "Str", "c": "a"}], + ["a/b.jpg", "fig:"], + ], + } + ], + }, + ], } out_json = apply_to_json(in_json, main) - assert edict.diff(out_json, { - "pandoc-api-version": [1, 17, 5, 1], - "meta": { - "$$references": { - "t": "MetaMap", - "c": { - "a": { + assert ( + edict.diff( + out_json, + { + "pandoc-api-version": [1, 17, 5, 1], + "meta": { + "$$references": { "t": "MetaMap", "c": { - "type": { - "t": "MetaString", - "c": "Math" + "a": { + "t": "MetaMap", + "c": { + "type": {"t": "MetaString", "c": "Math"}, + "number": {"t": "MetaString", "c": "1"}, + }, }, - "number": { - "t": "MetaString", - "c": "1" - } - } - }, - "b": { - "t": "MetaMap", - "c": { - "type": { - "t": "MetaString", - "c": "Image" + "b": { + "t": "MetaMap", + "c": { + "type": {"t": "MetaString", "c": "Image"}, + "number": {"t": "MetaString", "c": "1"}, + }, }, - "number": { - "t": "MetaString", - "c": "1" - } - } + }, } - } - } - }, - "blocks": [ - {"t": "Para", - "c": [ - {"t": "Span", - "c": [ - ["a", ["labelled-Math", "a"], [["b", "2"]]], - [{"t": "Math", - "c": [{"t": "InlineMath"}, "a=1"]}]] - }] - }, - { - "t": "Para", - "c": [ + }, + "blocks": [ { - "t": "Image", - "c": [["b", ["x"], [["a", "$1$"], ["b", "2"]]], - [{"t": "Str", "c": "a"}], - ["a/b.jpg", "fig:"]] - } - ] - } - ] - } - ) == {} + "t": "Para", + "c": [ + { + "t": "Span", + "c": [ + ["a", ["labelled-Math", "a"], [["b", "2"]]], + [{"t": "Math", "c": [{"t": "InlineMath"}, "a=1"]}], + ], + } + ], + }, + { + "t": "Para", + "c": [ + { + "t": "Image", + "c": [ + ["b", ["x"], [["a", "$1$"], ["b", "2"]]], + [{"t": "Str", "c": "a"}], + ["a/b.jpg", "fig:"], + ], + } + ], + }, + ], + }, + ) + == {} + ) def test_multiple_on_line(): @@ -115,90 +126,108 @@ def test_multiple_on_line(): ![a](a/b.jpg) """ in_json = { - "pandoc-api-version": [1, 17, 5, 1], "meta": {}, + "pandoc-api-version": [1, 17, 5, 1], + "meta": {}, "blocks": [ - {"t": "Para", "c": [ - {"t": "Math", "c": [{"t": "InlineMath"}, "a=1"]}, - {"t": "Space"}, - {"t": "Str", "c": "{#a"}, - {"t": "Space"}, - {"t": "Str", "c": "b="}, - {"t": "Math", "c": [ - {"t": "InlineMath"}, "2"]}, - {"t": "Str", "c": "}"}, - {"t": "Space"}, - {"t": "Math", "c": [ - {"t": "InlineMath"}, "g=3"]}, - {"t": "Space"}, - {"t": "Str", "c": "{#gid}"}] - }, - {"t": "Para", "c": [ - {"t": "Image", "c": [ - ["", [], []], [{"t": "Str", "c": "a"}], - ["a/b.jpg", "fig:"]]}] - }] + { + "t": "Para", + "c": [ + {"t": "Math", "c": [{"t": "InlineMath"}, "a=1"]}, + {"t": "Space"}, + {"t": "Str", "c": "{#a"}, + {"t": "Space"}, + {"t": "Str", "c": "b="}, + {"t": "Math", "c": [{"t": "InlineMath"}, "2"]}, + {"t": "Str", "c": "}"}, + {"t": "Space"}, + {"t": "Math", "c": [{"t": "InlineMath"}, "g=3"]}, + {"t": "Space"}, + {"t": "Str", "c": "{#gid}"}, + ], + }, + { + "t": "Para", + "c": [ + { + "t": "Image", + "c": [ + ["", [], []], + [{"t": "Str", "c": "a"}], + ["a/b.jpg", "fig:"], + ], + } + ], + }, + ], } out_json = apply_to_json(in_json, main) - assert edict.diff(out_json, { - "pandoc-api-version": [ - 1, 17, 5, 1 - ], - "meta": { - "$$references": { - "t": "MetaMap", - "c": { - "a": { - "t": "MetaMap", - "c": { - "type": { - "t": "MetaString", - "c": "Math"}, - "number": { - "t": "MetaString", - "c": "1" - } - } - }, - "gid": { + assert ( + edict.diff( + out_json, + { + "pandoc-api-version": [1, 17, 5, 1], + "meta": { + "$$references": { "t": "MetaMap", "c": { - "type": { - "t": "MetaString", - "c": "Math"}, - "number": { - "t": "MetaString", - "c": "2" - }}}}} - }, - "blocks": [ - {"t": "Para", - "c": [ - {"t": "Span", + "a": { + "t": "MetaMap", + "c": { + "type": {"t": "MetaString", "c": "Math"}, + "number": {"t": "MetaString", "c": "1"}, + }, + }, + "gid": { + "t": "MetaMap", + "c": { + "type": {"t": "MetaString", "c": "Math"}, + "number": {"t": "MetaString", "c": "2"}, + }, + }, + }, + } + }, + "blocks": [ + { + "t": "Para", "c": [ - ["a", ["labelled-Math"], [["b", "2"]]], - [{"t": "Math", - "c": [{"t": "InlineMath"}, "a=1"]}]] - }, - {"t": "Space"}, + { + "t": "Span", + "c": [ + ["a", ["labelled-Math"], [["b", "2"]]], + [{"t": "Math", "c": [{"t": "InlineMath"}, "a=1"]}], + ], + }, + {"t": "Space"}, + { + "t": "Span", + "c": [ + ["gid", ["labelled-Math"], []], + [{"t": "Math", "c": [{"t": "InlineMath"}, "g=3"]}], + ], + }, + ], + }, { - "t": "Span", - "c": [["gid", ["labelled-Math"], []], - [{"t": "Math", - "c": [{"t": "InlineMath"}, "g=3"]}]] - }] - }, - {"t": "Para", - "c": [ - {"t": "Image", - "c": [["", [], []], - [{"t": "Str", "c": "a"}], - ["a/b.jpg", "fig:"]] - } - ] - }] - }) == {} + "t": "Para", + "c": [ + { + "t": "Image", + "c": [ + ["", [], []], + [{"t": "Str", "c": "a"}], + ["a/b.jpg", "fig:"], + ], + } + ], + }, + ], + }, + ) + == {} + ) def test_with_tables(): @@ -213,96 +242,167 @@ def test_with_tables(): Table: Caption. {#tbl:id} """ in_json = { - "pandoc-api-version": [1, 17, 5, 1], "meta": {}, + "pandoc-api-version": [1, 17, 5, 1], + "meta": {}, "blocks": [ - {"t": "Para", "c": [ - {"t": "Str", "c": "Some"}, - {"t": "Space"}, - {"t": "Str", "c": "text"}]}, - {"t": "Table", "c": [ - [{"t": "Str", "c": "Caption."}, - {"t": "Space"}, - {"t": "Str", "c": "{#tbl:id}"}], - [{"t": "AlignDefault"}, - {"t": "AlignDefault"}, - {"t": "AlignDefault"}], - [0, 0, 0], - [[{"t": "Plain", "c": [{"t": "Str", "c": "a"}]}], - [{"t": "Plain", "c": [{"t": "Str", "c": "b"}]}], - [{"t": "Plain", "c": [{"t": "Str", "c": "c"}]}]], - [[[{"t": "Plain", "c": [{"t": "Str", "c": "1"}]}], - [{"t": "Plain", "c": [{"t": "Str", "c": "2"}]}], - [{"t": "Plain", "c": [{"t": "Str", "c": "3"}]}]], - [[{"t": "Plain", "c": [{"t": "Str", "c": "4"}]}], - [{"t": "Plain", "c": [{"t": "Str", "c": "5"}]}], - [{"t": "Plain", "c": [{"t": "Str", "c": "6"}]}]]]]}], + { + "t": "Para", + "c": [ + {"t": "Str", "c": "Some"}, + {"t": "Space"}, + {"t": "Str", "c": "text"}, + ], + }, + { + "t": "Table", + "c": [ + [ + {"t": "Str", "c": "Caption."}, + {"t": "Space"}, + {"t": "Str", "c": "{#tbl:id}"}, + ], + [ + {"t": "AlignDefault"}, + {"t": "AlignDefault"}, + {"t": "AlignDefault"}, + ], + [0, 0, 0], + [ + [{"t": "Plain", "c": [{"t": "Str", "c": "a"}]}], + [{"t": "Plain", "c": [{"t": "Str", "c": "b"}]}], + [{"t": "Plain", "c": [{"t": "Str", "c": "c"}]}], + ], + [ + [ + [{"t": "Plain", "c": [{"t": "Str", "c": "1"}]}], + [{"t": "Plain", "c": [{"t": "Str", "c": "2"}]}], + [{"t": "Plain", "c": [{"t": "Str", "c": "3"}]}], + ], + [ + [{"t": "Plain", "c": [{"t": "Str", "c": "4"}]}], + [{"t": "Plain", "c": [{"t": "Str", "c": "5"}]}], + [{"t": "Plain", "c": [{"t": "Str", "c": "6"}]}], + ], + ], + ], + }, + ], } out_json = apply_to_json(in_json, main) - assert edict.diff( - out_json, - { - "pandoc-api-version": [1, 17, 5, 1], - "meta": { - "$$references": { - "t": "MetaMap", - "c": { - "tbl:id": { - "t": "MetaMap", - "c": { - "type": { - "t": "MetaString", - "c": "Table" + assert ( + edict.diff( + out_json, + { + "pandoc-api-version": [1, 17, 5, 1], + "meta": { + "$$references": { + "t": "MetaMap", + "c": { + "tbl:id": { + "t": "MetaMap", + "c": { + "type": {"t": "MetaString", "c": "Table"}, + "number": {"t": "MetaString", "c": "1"}, }, - "number": { - "t": "MetaString", - "c": "1" - } } - }} - } - }, - "blocks": - [ - {"t": "Para", "c": [ - {"t": "Str", "c": "Some"}, - {"t": "Space"}, - {"t": "Str", "c": "text"} - ] + }, + } }, - {"t": "Div", "c": - [ - ["tbl:id", ["labelled-Table"], []], - [{"t": "Table", - "c": [ - [{"t": "Str", "c": "Caption."}, - {"t": "Space"} + "blocks": [ + { + "t": "Para", + "c": [ + {"t": "Str", "c": "Some"}, + {"t": "Space"}, + {"t": "Str", "c": "text"}, + ], + }, + { + "t": "Div", + "c": [ + ["tbl:id", ["labelled-Table"], []], + [ + { + "t": "Table", + "c": [ + [{"t": "Str", "c": "Caption."}, {"t": "Space"}], + [ + {"t": "AlignDefault"}, + {"t": "AlignDefault"}, + {"t": "AlignDefault"}, + ], + [0, 0, 0], + [ + [ + { + "t": "Plain", + "c": [{"t": "Str", "c": "a"}], + } + ], + [ + { + "t": "Plain", + "c": [{"t": "Str", "c": "b"}], + } + ], + [ + { + "t": "Plain", + "c": [{"t": "Str", "c": "c"}], + } + ], + ], + [ + [ + [ + { + "t": "Plain", + "c": [{"t": "Str", "c": "1"}], + } + ], + [ + { + "t": "Plain", + "c": [{"t": "Str", "c": "2"}], + } + ], + [ + { + "t": "Plain", + "c": [{"t": "Str", "c": "3"}], + } + ], + ], + [ + [ + { + "t": "Plain", + "c": [{"t": "Str", "c": "4"}], + } + ], + [ + { + "t": "Plain", + "c": [{"t": "Str", "c": "5"}], + } + ], + [ + { + "t": "Plain", + "c": [{"t": "Str", "c": "6"}], + } + ], + ], + ], + ], + } ], - [ - {"t": "AlignDefault"}, - {"t": "AlignDefault"}, - {"t": "AlignDefault"} - ], - [0, 0, 0], - [[{"t": "Plain", "c": [{"t": "Str", "c": "a"}]}], - [{"t": "Plain", "c": [{"t": "Str", "c": "b"}]}], - [{"t": "Plain", "c": [{"t": "Str", "c": "c"}]}]], - [[[{"t": "Plain", - "c": [{"t": "Str", "c": "1"}]}], - [{"t": "Plain", - "c": [{"t": "Str", "c": "2"}]}], - [{"t": "Plain", - "c": [{"t": "Str", "c": "3"}]}]], - [[{"t": "Plain", - "c": [{"t": "Str", "c": "4"}]}], - [{"t": "Plain", - "c": [{"t": "Str", "c": "5"}]}], - [{"t": "Plain", - "c": [{"t": "Str", "c": "6"}]}]] - ] - ] - } - ]] - } - ]}) == {} + ], + }, + ], + }, + ) + == {} + ) diff --git a/ipypublish/filters_pandoc/utils.py b/ipypublish/filters_pandoc/utils.py index 3a0cbc9..8ddd5e9 100644 --- a/ipypublish/filters_pandoc/utils.py +++ b/ipypublish/filters_pandoc/utils.py @@ -15,11 +15,17 @@ from ipypublish.filters_pandoc.definitions import IPUB_META_ROUTE -def apply_filter(in_object, filter_func=None, - out_format="panflute", in_format="markdown", - strip_meta=False, strip_blank_lines=False, - replace_api_version=True, dry_run=False, - **kwargs): +def apply_filter( + in_object, + filter_func=None, + out_format="panflute", + in_format="markdown", + strip_meta=False, + strip_blank_lines=False, + replace_api_version=True, + dry_run=False, + **kwargs +): # type: (list[str], FunctionType) -> str """convenience function to apply a panflute filter(s) to a string, list of string lines, pandoc AST or panflute.Doc @@ -56,17 +62,15 @@ def apply_filter(in_object, filter_func=None, pass elif isinstance(in_object, dict): if not in_format == "json": - raise AssertionError("the in_format for a dict should be json, " - "not {}".format(in_format)) + raise AssertionError( + "the in_format for a dict should be json, " "not {}".format(in_format) + ) if "meta" not in in_object: - raise ValueError( - "the in_object does contain a 'meta' key") + raise ValueError("the in_object does contain a 'meta' key") if "blocks" not in in_object: - raise ValueError( - "the in_object does contain a 'blocks' key") + raise ValueError("the in_object does contain a 'blocks' key") if "pandoc-api-version" not in in_object: - raise ValueError( - "the in_object does contain a 'pandoc-api-version' key") + raise ValueError("the in_object does contain a 'pandoc-api-version' key") if replace_api_version: # run pandoc on a null object, to get the correct api version null_raw = pf.run_pandoc("", args=["-t", "json"]) @@ -75,13 +79,12 @@ def apply_filter(in_object, filter_func=None, # see panflute.load, w.r.t to legacy version if api_version is None: - in_object = [{'unMeta': in_object["meta"]}, - in_object["blocks"]] + in_object = [{"unMeta": in_object["meta"]}, in_object["blocks"]] else: ans = OrderedDict() - ans['pandoc-api-version'] = api_version - ans['meta'] = in_object["meta"] - ans['blocks'] = in_object["blocks"] + ans["pandoc-api-version"] = api_version + ans["meta"] = in_object["meta"] + ans["blocks"] = in_object["blocks"] in_object = ans in_str = json.dumps(in_object) elif isinstance(in_object, (list, tuple)): @@ -92,8 +95,7 @@ def apply_filter(in_object, filter_func=None, raise TypeError("object not accepted: {}".format(in_object)) if not isinstance(in_object, pf.Doc): - doc = pf.convert_text( - in_str, input_format=in_format, standalone=True) + doc = pf.convert_text(in_str, input_format=in_format, standalone=True) # f = io.StringIO(in_json) # doc = pf.load(f) else: @@ -122,9 +124,9 @@ def apply_filter(in_object, filter_func=None, # pf.dump(doc, f) # jsonstr = f.getvalue() # jsonstr = json.dumps(out_doc.to_json() - out_str = pf.convert_text(out_doc, - input_format="panflute", - output_format=out_format) + out_str = pf.convert_text( + out_doc, input_format="panflute", output_format=out_format + ) # post-process final str if strip_blank_lines: @@ -176,8 +178,9 @@ def strip_quotes(string): return string -def find_attributes(element, allow_space=True, - search_left=False, include_element=False): +def find_attributes( + element, allow_space=True, search_left=False, include_element=False +): """find an attribute 'container' for an element, of the form {#id .class1 .class2 a=1 b="a string"} and extract its content @@ -221,22 +224,26 @@ def _search_attribute_right(element, include_element, allow_space): found_start = False found_end = False while adjacent: - if (isinstance(adjacent, pf.Space) and allow_space): + if isinstance(adjacent, pf.Space) and allow_space: attr_elements.append(adjacent) adjacent = adjacent.next continue - elif (isinstance(adjacent, pf.Str) - # and adjacent.text.startswith("{") - # and adjacent.text.endswith("}")): - and re.search(r'^\{[^}]*\}', adjacent.text)): + elif ( + isinstance(adjacent, pf.Str) + # and adjacent.text.startswith("{") + # and adjacent.text.endswith("}")): + and re.search(r"^\{[^}]*\}", adjacent.text) + ): # TODO this won't handle } in strings, e.g. {a="} "} found_start = True found_end = True attr_elements.append(adjacent) break - elif (isinstance(adjacent, pf.Str) - # and adjacent.text.startswith("{")): - and re.search(r'^[^\}]*\{', adjacent.text)): + elif ( + isinstance(adjacent, pf.Str) + # and adjacent.text.startswith("{")): + and re.search(r"^[^\}]*\{", adjacent.text) + ): found_start = True found_end = False attr_elements.append(adjacent) @@ -247,9 +254,11 @@ def _search_attribute_right(element, include_element, allow_space): if found_start and not found_end: adjacent = adjacent.next while adjacent: - if (isinstance(adjacent, pf.Str) - # and adjacent.text.endswith("}")): - and re.search(r'^[^\{]*\}', adjacent.text)): + if ( + isinstance(adjacent, pf.Str) + # and adjacent.text.endswith("}")): + and re.search(r"^[^\{]*\}", adjacent.text) + ): # TODO this won't handle } in strings, e.g. {a="} "} found_end = True attr_elements.append(adjacent) @@ -261,8 +270,7 @@ def _search_attribute_right(element, include_element, allow_space): if not (found_start and found_end): return None - attribute_str = pf.stringify( - pf.Para(*attr_elements)).replace("\n", " ").strip() + attribute_str = pf.stringify(pf.Para(*attr_elements)).replace("\n", " ").strip() # split into the label and the rest match = re.match(r"^\{(#[^\s]+|)([^\}]*)\}", attribute_str) @@ -270,14 +278,14 @@ def _search_attribute_right(element, include_element, allow_space): raise ValueError(attribute_str) classes, attributes = process_attributes(match.group(2)) - new_str = attribute_str[len(match.group(0)):] + new_str = attribute_str[len(match.group(0)) :] return { "id": match.group(1)[1:], "classes": classes, "attributes": attributes, "elements": attr_elements, - "append": pf.Str(new_str) if new_str else None + "append": pf.Str(new_str) if new_str else None, } @@ -294,21 +302,22 @@ def _search_attribute_left(element, include_element, allow_space): found_start = False found_end = False while adjacent: - if (isinstance(adjacent, pf.Space) and allow_space): + if isinstance(adjacent, pf.Space) and allow_space: attr_elements.append(adjacent) adjacent = adjacent.prev continue - elif (isinstance(adjacent, pf.Str) - and adjacent.text.endswith("}") - and adjacent.text.startswith("{")): + elif ( + isinstance(adjacent, pf.Str) + and adjacent.text.endswith("}") + and adjacent.text.startswith("{") + ): # TODO this won't handle } in strings, e.g. {a="} "} # TODO this won't handle characters after } e.g. {a=1}) found_start = True found_end = True attr_elements.append(adjacent) break - elif (isinstance(adjacent, pf.Str) - and adjacent.text.endswith("}")): + elif isinstance(adjacent, pf.Str) and adjacent.text.endswith("}"): found_start = False found_end = True attr_elements.append(adjacent) @@ -319,8 +328,7 @@ def _search_attribute_left(element, include_element, allow_space): if found_end and not found_start: adjacent = adjacent.prev while adjacent: - if (isinstance(adjacent, pf.Str) - and adjacent.text.startswith("{")): + if isinstance(adjacent, pf.Str) and adjacent.text.startswith("{"): # TODO this won't handle { in strings, e.g. {a="{ "} # TODO this won't handle characters before { e.g. ({a=1} found_start = True @@ -335,8 +343,7 @@ def _search_attribute_left(element, include_element, allow_space): attr_elements = list(reversed(attr_elements)) - attribute_str = pf.stringify( - pf.Para(*attr_elements)).replace("\n", " ").strip() + attribute_str = pf.stringify(pf.Para(*attr_elements)).replace("\n", " ").strip() # split into the label and the rest match = re.match("^\\{(#[^\\s]+|)([^\\}]*)\\}$", attribute_str) @@ -349,7 +356,7 @@ def _search_attribute_left(element, include_element, allow_space): "classes": classes, "attributes": attributes, "elements": attr_elements, - "append": None + "append": None, } @@ -363,12 +370,15 @@ def process_attributes(attr_string): dict: attributes """ # find classes, denoted by .class-name - classes = [c[1][1:] for c in re.findall('(^|\\s)(\\.[\\-\\_a-zA-Z]+)', - attr_string)] + classes = [c[1][1:] for c in re.findall("(^|\\s)(\\.[\\-\\_a-zA-Z]+)", attr_string)] # find attributes, denoted by a=b, respecting quotes - attr = {c[1]: strip_quotes(c[2]) for c in re.findall( - '(^|\\s)([\\-\\_a-zA-Z]+)\\s*=\\s*(\\".+\\"|\\\'.+\\\'|[^\\s\\"\\\']+)', # noqa: E501 - attr_string)} + attr = { + c[1]: strip_quotes(c[2]) + for c in re.findall( + "(^|\\s)([\\-\\_a-zA-Z]+)\\s*=\\s*(\\\".+\\\"|\\'.+\\'|[^\\s\\\"\\']+)", # noqa: E501 + attr_string, + ) + } # TODO this generally works, but should be stricter against any weird # fringe cases @@ -389,11 +399,9 @@ def convert_attributes(attr): def convert_units(string, out_units): - match = re.compile( - "^\\s*([0-9]+\\.?[0-9]*)([a-z\\%]*)\\s*$").match(str(string)) + match = re.compile("^\\s*([0-9]+\\.?[0-9]*)([a-z\\%]*)\\s*$").match(str(string)) if match is None: - raise ValueError( - "string could not be resolved as a value: {}".format(string)) + raise ValueError("string could not be resolved as a value: {}".format(string)) value = float(match.group(1)) in_units = match.group(2) in_units = "fraction" if not in_units else in_units @@ -402,19 +410,20 @@ def convert_units(string, out_units): return value convert = { - ("%", "fraction"): lambda x: x / 100., - ("fraction", "%"): lambda x: x*100. + ("%", "fraction"): lambda x: x / 100.0, + ("fraction", "%"): lambda x: x * 100.0, }.get((in_units, out_units), None) if convert is None: - raise ValueError("could not find a conversion for " - "{0} to {1}: {2}".format(in_units, out_units, string)) + raise ValueError( + "could not find a conversion for " + "{0} to {1}: {2}".format(in_units, out_units, string) + ) return convert(value) -def get_option(locations, keypath, default=None, - delimiter=".", error_on_missing=False): +def get_option(locations, keypath, default=None, delimiter=".", error_on_missing=False): """ fetch an option variable from a hierarchy of preferred locations The value returned will be from the first available location or the default @@ -471,8 +480,7 @@ def get_option(locations, keypath, default=None, if found_var: return variable elif error_on_missing: - raise ValueError( - "could not retrieve the option keypath: {}".format(keypath)) + raise ValueError("could not retrieve the option keypath: {}".format(keypath)) return default @@ -504,7 +512,7 @@ def get_panflute_containers(element): pf.Strikeout, pf.Strong, pf.Subscript, - pf.Superscript + pf.Superscript, ) panflute_block_containers = ( @@ -514,7 +522,7 @@ def get_panflute_containers(element): pf.Doc, pf.ListItem, pf.Note, - pf.TableCell + pf.TableCell, ) if issubclass(element, pf.Inline): @@ -545,7 +553,7 @@ def get_pf_content_attr(container, target): pf.Subscript, pf.Superscript, pf.Table, - pf.DefinitionItem + pf.DefinitionItem, ] panflute_block_containers = ( @@ -555,7 +563,7 @@ def get_pf_content_attr(container, target): pf.Doc, pf.ListItem, pf.Note, - pf.TableCell + pf.TableCell, ) if issubclass(target, pf.Cite): diff --git a/ipypublish/frontend/nbpresent.py b/ipypublish/frontend/nbpresent.py index 14c5e28..d5019a9 100644 --- a/ipypublish/frontend/nbpresent.py +++ b/ipypublish/frontend/nbpresent.py @@ -11,13 +11,18 @@ logger = logging.getLogger("nbpresent") -def nbpresent(inpath, - outformat='slides_standard', - outpath=None, dump_files=True, - ignore_prefix='_', clear_files=False, - log_level='INFO', dry_run=False, - print_traceback=False, - export_paths=()): +def nbpresent( + inpath, + outformat="slides_standard", + outpath=None, + dump_files=True, + ignore_prefix="_", + clear_files=False, + log_level="INFO", + dry_run=False, + print_traceback=False, + export_paths=(), +): """ load reveal.js slides as a web server, converting from ipynb first if path extension is .ipynb @@ -42,27 +47,29 @@ def nbpresent(inpath, inpath_name, inpath_ext = os.path.splitext(os.path.basename(inpath)) output_mimetype = guess_type(inpath, strict=False)[0] - output_mimetype = 'unknown' if output_mimetype is None else output_mimetype + output_mimetype = "unknown" if output_mimetype is None else output_mimetype if output_mimetype != "text/html": - config = {"IpyPubMain": { - "conversion": outformat, - "plugin_folder_paths": export_paths, - "outpath": outpath, - "ignore_prefix": ignore_prefix, - "log_to_stdout": True, - "log_level_stdout": log_level, - "log_to_file": True, - "log_level_file": log_level, - "default_pporder_kwargs": dict( - dry_run=dry_run, - clear_existing=clear_files, - dump_files=dump_files, - serve_html=True, - slides=True - ) - }} + config = { + "IpyPubMain": { + "conversion": outformat, + "plugin_folder_paths": export_paths, + "outpath": outpath, + "ignore_prefix": ignore_prefix, + "log_to_stdout": True, + "log_level_stdout": log_level, + "log_to_file": True, + "log_level_file": log_level, + "default_pporder_kwargs": dict( + dry_run=dry_run, + clear_existing=clear_files, + dump_files=dump_files, + serve_html=True, + slides=True, + ), + } + } publish = IpyPubMain(config=config) try: outdata = publish(inpath) diff --git a/ipypublish/frontend/nbpublish.py b/ipypublish/frontend/nbpublish.py index 837b38c..b80c6c3 100644 --- a/ipypublish/frontend/nbpublish.py +++ b/ipypublish/frontend/nbpublish.py @@ -8,16 +8,22 @@ logger = logging.getLogger("nbpublish") -def nbpublish(ipynb_path, - outformat='latex_ipypublish_main', - outpath=None, dump_files=True, - ignore_prefix='_', clear_files=False, - create_pdf=False, - pdf_in_temp=False, pdf_debug=False, - launch_browser=False, - log_level='INFO', dry_run=False, - print_traceback=False, - export_paths=()): +def nbpublish( + ipynb_path, + outformat="latex_ipypublish_main", + outpath=None, + dump_files=True, + ignore_prefix="_", + clear_files=False, + create_pdf=False, + pdf_in_temp=False, + pdf_debug=False, + launch_browser=False, + log_level="INFO", + dry_run=False, + print_traceback=False, + export_paths=(), +): """ convert one or more Jupyter notebooks to a published format paths can be string of an existing file or folder, @@ -49,27 +55,29 @@ def nbpublish(ipynb_path, """ # run - config = {"IpyPubMain": { - "conversion": outformat, - "plugin_folder_paths": export_paths, - "outpath": outpath, - "ignore_prefix": ignore_prefix, - "log_to_stdout": True, - "log_level_stdout": log_level, - "log_to_file": True, - "log_level_file": log_level, - "default_pporder_kwargs": dict( - dry_run=dry_run, - clear_existing=clear_files, - dump_files=dump_files, - create_pdf=create_pdf, - ), - "default_ppconfig_kwargs": dict( - pdf_in_temp=pdf_in_temp, - pdf_debug=pdf_debug, - launch_browser=launch_browser - ) - }} + config = { + "IpyPubMain": { + "conversion": outformat, + "plugin_folder_paths": export_paths, + "outpath": outpath, + "ignore_prefix": ignore_prefix, + "log_to_stdout": True, + "log_level_stdout": log_level, + "log_to_file": True, + "log_level_file": log_level, + "default_pporder_kwargs": dict( + dry_run=dry_run, + clear_existing=clear_files, + dump_files=dump_files, + create_pdf=create_pdf, + ), + "default_ppconfig_kwargs": dict( + pdf_in_temp=pdf_in_temp, + pdf_debug=pdf_debug, + launch_browser=launch_browser, + ), + } + } publish = IpyPubMain(config=config) try: publish(ipynb_path) diff --git a/ipypublish/frontend/shared.py b/ipypublish/frontend/shared.py index 21f955d..0ac7446 100644 --- a/ipypublish/frontend/shared.py +++ b/ipypublish/frontend/shared.py @@ -7,24 +7,21 @@ from ipypublish.convert.config_manager import iter_all_export_infos -class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, - argparse.RawDescriptionHelpFormatter, - ): +class CustomFormatter( + argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter +): pass class CustomParser(argparse.ArgumentParser): def error(self, message): - sys.stderr.write('error: %s\n' % message) + sys.stderr.write("error: %s\n" % message) self.print_help() sys.exit(2) def get_parser(**kwargs): - return CustomParser( - formatter_class=CustomFormatter, - **kwargs - ) + return CustomParser(formatter_class=CustomFormatter, **kwargs) def get_plugin_str(plugin_folder_paths, regex, verbose): @@ -32,9 +29,11 @@ def get_plugin_str(plugin_folder_paths, regex, verbose): outstrs = [] # outstrs.append('Available Export Configurations') # outstrs.append('-------------------------------') - configs = [e for e in iter_all_export_infos( - plugin_folder_paths, get_mime=verbose) - if fnmatch.fnmatch(e["key"], "*{}*".format(regex))] + configs = [ + e + for e in iter_all_export_infos(plugin_folder_paths, get_mime=verbose) + if fnmatch.fnmatch(e["key"], "*{}*".format(regex)) + ] for item in sorted(configs, key=lambda i: (i["class"], i["key"])): outstrs.append("- Key: {}".format(item["key"])) @@ -68,95 +67,161 @@ def parse_options(sys_args, program): if program == "nbpresent": parser = get_parser( - description=('load reveal.js slides as a web server,\n' - 'converting from ipynb first ' - 'if path extension is `ipynb`')) - file_help = 'path to html or ipynb file' - default_key = 'slides_ipypublish_main' + description=( + "load reveal.js slides as a web server,\n" + "converting from ipynb first " + "if path extension is `ipynb`" + ) + ) + file_help = "path to html or ipynb file" + default_key = "slides_ipypublish_main" else: parser = get_parser( - description=('convert one or more Jupyter notebooks ' - 'to a publishable format')) - file_help = 'notebook file or directory' - default_key = 'latex_ipypublish_main' - - parser.add_argument('--version', action='version', version=__version__) - - parser.add_argument("filepath", type=str, nargs='?', - help=file_help, - metavar='filepath') - - parser.add_argument("-f", "--outformat", type=str, - metavar='key|filepath', - help=('export format configuration to use, ' - 'can be a key name or path to the file'), - default=default_key) - - export_group = parser.add_argument_group('export configurations') - export_group.add_argument("-ep", "--export-paths", - action='append', metavar='path', type=str, - help=("add additional folder paths, " - "containing export configurations"), - default=[]) - export_group.add_argument("-le", "--list-exporters", type=str, - metavar='filter', nargs='?', const='*', - help=("list export configurations, " - "optionally filtered e.g. -le html*")) - export_group.add_argument("-lv", "--list-verbose", action="store_true", - help=("when listing export configurations, " - "give a verbose description")) - - nbmerge_group = parser.add_argument_group('nb merge') - nbmerge_group.add_argument("-i", "--ignore-prefix", - type=str, metavar='str', default='_', - help='ignore ipynb files with this prefix') - - output_group = parser.add_argument_group('output') - output_group.add_argument("-o", "--outpath", type=str, metavar='str', - help='path to output converted files', - default=os.path.join(os.getcwd(), 'converted')) + description=( + "convert one or more Jupyter notebooks " "to a publishable format" + ) + ) + file_help = "notebook file or directory" + default_key = "latex_ipypublish_main" + + parser.add_argument("--version", action="version", version=__version__) + + parser.add_argument( + "filepath", type=str, nargs="?", help=file_help, metavar="filepath" + ) + + parser.add_argument( + "-f", + "--outformat", + type=str, + metavar="key|filepath", + help=( + "export format configuration to use, " + "can be a key name or path to the file" + ), + default=default_key, + ) + + export_group = parser.add_argument_group("export configurations") + export_group.add_argument( + "-ep", + "--export-paths", + action="append", + metavar="path", + type=str, + help=("add additional folder paths, " "containing export configurations"), + default=[], + ) + export_group.add_argument( + "-le", + "--list-exporters", + type=str, + metavar="filter", + nargs="?", + const="*", + help=("list export configurations, " "optionally filtered e.g. -le html*"), + ) + export_group.add_argument( + "-lv", + "--list-verbose", + action="store_true", + help=("when listing export configurations, " "give a verbose description"), + ) + + nbmerge_group = parser.add_argument_group("nb merge") + nbmerge_group.add_argument( + "-i", + "--ignore-prefix", + type=str, + metavar="str", + default="_", + help="ignore ipynb files with this prefix", + ) + + output_group = parser.add_argument_group("output") + output_group.add_argument( + "-o", + "--outpath", + type=str, + metavar="str", + help="path to output converted files", + default=os.path.join(os.getcwd(), "converted"), + ) # output_group.add_argument("-d","--dump-files", action="store_true", # help='dump external files, ' # 'linked to in the document, into the outpath') - output_group.add_argument("-c", "--clear-files", action="store_true", - help=('clear any external files ' - 'that already exist in the outpath')) + output_group.add_argument( + "-c", + "--clear-files", + action="store_true", + help=("clear any external files " "that already exist in the outpath"), + ) if program == "nbpublish": - pdf_group = parser.add_argument_group('pdf export') - pdf_group.add_argument("-pdf", "--create-pdf", action="store_true", - help='convert to pdf (only if latex exporter)') - pdf_group.add_argument("-ptemp", "--pdf-in-temp", action="store_true", - help=('run pdf conversion in a temporary folder' - ' and only copy back the .pdf file')) - pdf_group.add_argument("-pbug", "--pdf-debug", action="store_true", - help='run latexmk in interactive mode') - - view_group = parser.add_argument_group('view output') + pdf_group = parser.add_argument_group("pdf export") + pdf_group.add_argument( + "-pdf", + "--create-pdf", + action="store_true", + help="convert to pdf (only if latex exporter)", + ) + pdf_group.add_argument( + "-ptemp", + "--pdf-in-temp", + action="store_true", + help=( + "run pdf conversion in a temporary folder" + " and only copy back the .pdf file" + ), + ) + pdf_group.add_argument( + "-pbug", + "--pdf-debug", + action="store_true", + help="run latexmk in interactive mode", + ) + + view_group = parser.add_argument_group("view output") view_group.add_argument( - "-lb", "--launch-browser", action="store_true", - help='open the output in an available web-browser') - - debug_group = parser.add_argument_group('debugging') - debug_group.add_argument("-log", "--log-level", type=str, default='info', - choices=['debug', 'info', 'warning', 'error'], - help='the logging level to output to screen/file') - debug_group.add_argument("-pt", "--print-traceback", action="store_true", - help=("print the full exception traceback")) - debug_group.add_argument("-dr", "--dry-run", action="store_true", - help=("perform a 'dry run', " - "which will not output any files")) + "-lb", + "--launch-browser", + action="store_true", + help="open the output in an available web-browser", + ) + + debug_group = parser.add_argument_group("debugging") + debug_group.add_argument( + "-log", + "--log-level", + type=str, + default="info", + choices=["debug", "info", "warning", "error"], + help="the logging level to output to screen/file", + ) + debug_group.add_argument( + "-pt", + "--print-traceback", + action="store_true", + help=("print the full exception traceback"), + ) + debug_group.add_argument( + "-dr", + "--dry-run", + action="store_true", + help=("perform a 'dry run', " "which will not output any files"), + ) args = parser.parse_args(sys_args) options = vars(args) - filepath = options.pop('filepath') + filepath = options.pop("filepath") list_plugins = options.pop("list_exporters") list_verbose = options.pop("list_verbose") if filepath is None and list_plugins: - parser.exit(message=get_plugin_str( - options["export_paths"], list_plugins, list_verbose)) + parser.exit( + message=get_plugin_str(options["export_paths"], list_plugins, list_verbose) + ) elif filepath is None: parser.error("no filepath specified") diff --git a/ipypublish/port_api/convert_format_str.py b/ipypublish/port_api/convert_format_str.py index 020e604..d3c342a 100644 --- a/ipypublish/port_api/convert_format_str.py +++ b/ipypublish/port_api/convert_format_str.py @@ -2,12 +2,10 @@ class DefaultFormatter(string.Formatter): - def get_value(self, key, args, kwargs): # Handle a key not found try: - val = super(DefaultFormatter, self).get_value( - key, args, kwargs) + val = super(DefaultFormatter, self).get_value(key, args, kwargs) # Python 3, 'super().get_field(field_name, args, kwargs)' works except (IndexError, KeyError): if str(key).endswith("_pre"): @@ -28,22 +26,23 @@ def convert_format_str(template): if __name__ == "__main__": - template = ["{{%- extends 'null.tpl' -%}}", - "{{% block header %}}", - "{{{{ nb.metadata | meta2yaml('#~~ ') }}}}", - "{{% endblock header %}}", - "{{% block codecell %}}", - "#%%", - "{{{{ super() }}}}", - "{{% endblock codecell %}}", - "{{% block in_prompt %}}{{% endblock in_prompt %}}", - "{{% block input %}}{{{{ cell.metadata | meta2yaml('#~~ ') }}}}", # noqa: E501 - "{{{{ cell.source | ipython2python }}}}", - "{{% endblock input %}}", - "{{% block markdowncell scoped %}}#%% [markdown]", - "{{{{ cell.metadata | meta2yaml('#~~ ') }}}}", - "{{{{ cell.source | comment_lines }}}}", - "{{% endblock markdowncell %}}" - ] + template = [ + "{{%- extends 'null.tpl' -%}}", + "{{% block header %}}", + "{{{{ nb.metadata | meta2yaml('#~~ ') }}}}", + "{{% endblock header %}}", + "{{% block codecell %}}", + "#%%", + "{{{{ super() }}}}", + "{{% endblock codecell %}}", + "{{% block in_prompt %}}{{% endblock in_prompt %}}", + "{{% block input %}}{{{{ cell.metadata | meta2yaml('#~~ ') }}}}", # noqa: E501 + "{{{{ cell.source | ipython2python }}}}", + "{{% endblock input %}}", + "{{% block markdowncell scoped %}}#%% [markdown]", + "{{{{ cell.metadata | meta2yaml('#~~ ') }}}}", + "{{{{ cell.source | comment_lines }}}}", + "{{% endblock markdowncell %}}", + ] print(convert_format_str(template)) diff --git a/ipypublish/port_api/plugin_to_json.py b/ipypublish/port_api/plugin_to_json.py index 8e67852..aa7f362 100644 --- a/ipypublish/port_api/plugin_to_json.py +++ b/ipypublish/port_api/plugin_to_json.py @@ -18,9 +18,7 @@ def assess_syntax(path): imported = {} assignments = {} for i, child in enumerate(ast.iter_child_nodes(syntax_tree)): - if (i == 0 - and isinstance(child, ast.Expr) - and isinstance(child.value, ast.Str)): + if i == 0 and isinstance(child, ast.Expr) and isinstance(child.value, ast.Str): docstring = child.value.s elif isinstance(child, ast.ImportFrom): module = child.module @@ -31,16 +29,18 @@ def assess_syntax(path): targets = child.targets if len(targets) > 1: raise IOError( - "cannot handle expansion assignments " - "(e.g. `a, b = [1, 2]`)") + "cannot handle expansion assignments " "(e.g. `a, b = [1, 2]`)" + ) target = child.targets[0] # type: ast.Name assignments[target.id] = child.value else: unknowns.append(child) if unknowns: - print("Warning this script can only handle 'ImportFrom' and 'Assign' " - "syntax, found additional items: {}".format(unknowns)) + print( + "Warning this script can only handle 'ImportFrom' and 'Assign' " + "syntax, found additional items: {}".format(unknowns) + ) return docstring, imported, assignments @@ -48,8 +48,8 @@ def assess_syntax(path): def ast_to_json(item, imported, assignments): """recursively convert ast items to json friendly values""" value = None - if item in ['True', 'False', 'None']: # python 2.7 - value = {'True': True, 'False': False, 'None': None}[item] + if item in ["True", "False", "None"]: # python 2.7 + value = {"True": True, "False": False, "None": None}[item] elif hasattr(ast, "NameConstant") and isinstance(item, ast.NameConstant): value = item.value elif isinstance(item, ast.Str): @@ -61,11 +61,10 @@ def ast_to_json(item, imported, assignments): value = imported[item.id] elif item.id in assignments: value = ast_to_json(assignments[item.id], imported, assignments) - elif item.id in ['True', 'False', 'None']: # python 2.7 - value = {'True': True, 'False': False, 'None': None}[item.id] + elif item.id in ["True", "False", "None"]: # python 2.7 + value = {"True": True, "False": False, "None": None}[item.id] else: - raise ValueError( - "could not find assignment '{}' in config".format(item.id)) + raise ValueError("could not find assignment '{}' in config".format(item.id)) elif isinstance(item, (ast.List, ast.Tuple, ast.Set)): value = [ast_to_json(i, imported, assignments) for i in item.elts] elif isinstance(item, ast.Dict): @@ -82,8 +81,7 @@ def convert_dict(dct, imported, assignments): out_dict = {} for key, val in zip(dct.keys, dct.values): if not isinstance(key, ast.Str): - raise ValueError( - "expected key to be a Str; {}".format(key)) + raise ValueError("expected key to be a Str; {}".format(key)) out_dict[key.s] = ast_to_json(val, imported, assignments) return out_dict @@ -93,28 +91,29 @@ def convert_oformat(oformat): if oformat == "Notebook": outline = None # TODO do notebooks need template (they have currently) - exporter = 'nbconvert.exporters.NotebookExporter' + exporter = "nbconvert.exporters.NotebookExporter" elif oformat == "Latex": - exporter = 'nbconvert.exporters.LatexExporter' + exporter = "nbconvert.exporters.LatexExporter" outline = { "module": "ipypublish.templates.outline_schemas", - "file": "latex_outline.latex.j2" + "file": "latex_outline.latex.j2", } elif oformat == "HTML": - exporter = 'nbconvert.exporters.HTMLExporter' + exporter = "nbconvert.exporters.HTMLExporter" outline = { "module": "ipypublish.templates.outline_schemas", - "file": "html_outline.html.j2" + "file": "html_outline.html.j2", } elif oformat == "Slides": - exporter = 'nbconvert.exporters.SlidesExporter' + exporter = "nbconvert.exporters.SlidesExporter" outline = { "module": "ipypublish.templates.outline_schemas", - "file": "html_outline.html.j2" + "file": "html_outline.html.j2", } else: - raise ValueError("expected oformat to be: " - "'Notebook', 'Latex', 'HTML' or 'Slides'") + raise ValueError( + "expected oformat to be: " "'Notebook', 'Latex', 'HTML' or 'Slides'" + ) return exporter, outline @@ -133,7 +132,8 @@ def convert_config(config, exporter_class, allow_other): if preprocs: raise ValueError( "'config' contains both Exporter.preprocessors and " - "TemplateExporter.preprocessors") + "TemplateExporter.preprocessors" + ) for p in val: pname = p.split(".")[-1] preprocs[pname] = {"class": p, "args": {}} @@ -144,12 +144,15 @@ def convert_config(config, exporter_class, allow_other): # second parse for key, val in config.items(): - if key in ["Exporter.filters", "TemplateExporter.filters", - "Exporter.preprocessors", "TemplateExporter.preprocessors"]: + if key in [ + "Exporter.filters", + "TemplateExporter.filters", + "Exporter.preprocessors", + "TemplateExporter.preprocessors", + ]: continue if key.split(".")[0] in preprocs: - preprocs[key.split(".")[0]]["args"][".".join( - key.split(".")[1:])] = val + preprocs[key.split(".")[0]]["args"][".".join(key.split(".")[1:])] = val else: other[key] = val @@ -161,7 +164,7 @@ def convert_config(config, exporter_class, allow_other): "class": exporter_class, "filters": filters, "preprocessors": list(preprocs.values()), - "other_args": other + "other_args": other, } return output @@ -174,29 +177,26 @@ def replace_template_path(path): if module == "ipypublish.html.ipypublish": return { "module": "ipypublish.templates.segments", - "file": "ipy-{0}.html-tplx.json".format(name) + "file": "ipy-{0}.html-tplx.json".format(name), } elif module == "ipypublish.html.standard": return { "module": "ipypublish.templates.segments", - "file": "std-{0}.html-tplx.json".format(name) + "file": "std-{0}.html-tplx.json".format(name), } elif module == "ipypublish.latex.standard": return { "module": "ipypublish.templates.segments", - "file": "std-{0}.latex-tpl.json".format(name) + "file": "std-{0}.latex-tpl.json".format(name), } elif module == "ipypublish.latex.ipypublish": return { "module": "ipypublish.templates.segments", - "file": "ipy-{0}.latex-tpl.json".format(name) + "file": "ipy-{0}.latex-tpl.json".format(name), } else: print("Warning: unknown template path: {}".format(path)) - return { - "module": module, - "file": "{0}.json".format(name) - } + return {"module": module, "file": "{0}.json".format(name)} def create_json(docstring, imported, assignments, allow_other=True): @@ -224,20 +224,17 @@ def create_json(docstring, imported, assignments, allow_other=True): config = None template = None for value, expr in assignments.items(): - if value == 'oformat': + if value == "oformat": if not isinstance(expr, ast.Str): - raise ValueError( - "expected 'oformat' to be a Str; {}".format(expr)) + raise ValueError("expected 'oformat' to be a Str; {}".format(expr)) oformat = expr.s elif value == "config": if not isinstance(expr, ast.Dict): - raise ValueError( - "expected 'config' to be a Dict; {}".format(expr)) + raise ValueError("expected 'config' to be a Dict; {}".format(expr)) config = convert_dict(expr, imported, assignments) elif value == "template": if not isinstance(expr, ast.Call): - raise ValueError( - "expected 'config' to be a call to create_tpl(x)") + raise ValueError("expected 'config' to be a call to create_tpl(x)") # func = expr.func # TODO make sure func name is create_tpl/tplx args = expr.args keywords = expr.keywords @@ -248,8 +245,8 @@ def create_json(docstring, imported, assignments, allow_other=True): seg_list = seg_list.generators[0].iter if not isinstance(seg_list, ast.List): raise ValueError( - "expected create_tpl(x) arg to be a List; {}".format( - seg_list)) + "expected create_tpl(x) arg to be a List; {}".format(seg_list) + ) segments = [] for seg in seg_list.elts: if isinstance(seg, ast.Attribute): @@ -258,8 +255,9 @@ def create_json(docstring, imported, assignments, allow_other=True): seg_name = seg.id else: raise ValueError( - "expected seg in template to be an Attribute; " + - "{1}".format(seg)) + "expected seg in template to be an Attribute; " + + "{1}".format(seg) + ) if seg_name not in imported: raise ValueError("segment '{}' not found".format(seg_name)) @@ -277,16 +275,17 @@ def create_json(docstring, imported, assignments, allow_other=True): exporter = convert_config(config, exporter_class, allow_other) if any(["biblio_natbib" in s for s in template]): - exporter["filters"]["strip_ext"] = ( - "ipypublish.filters.filters.strip_ext") + exporter["filters"]["strip_ext"] = "ipypublish.filters.filters.strip_ext" return { "description": docstring.splitlines(), "exporter": exporter, - "template": None if outline is None else { + "template": None + if outline is None + else { "outline": outline, - "segments": [replace_template_path(s) for s in template] - } + "segments": [replace_template_path(s) for s in template], + }, } @@ -324,19 +323,20 @@ def convert_to_json(path, outpath=None, ignore_other=False): if False: import glob import os + for path in glob.glob( - "/Users/cjs14/GitHub/ipypublish" - "/ipypublish/export_plugins/*.py"): + "/Users/cjs14/GitHub/ipypublish" "/ipypublish/export_plugins/*.py" + ): dirname = os.path.dirname(path) name = os.path.splitext(os.path.basename(path))[0] try: - convert_to_json(path, os.path.join(dirname, name + ".json"), - ignore_other=True) + convert_to_json( + path, os.path.join(dirname, name + ".json"), ignore_other=True + ) except ValueError as err: print("{0} failed: {1}".format(path, err)) convert_to_json( - "/Users/cjs14/GitHub/ipypublish" - "/ipypublish_plugins/example_new_plugin.py", - "/Users/cjs14/GitHub/ipypublish" - "/ipypublish_plugins/example_new_plugin.json") + "/Users/cjs14/GitHub/ipypublish" "/ipypublish_plugins/example_new_plugin.py", + "/Users/cjs14/GitHub/ipypublish" "/ipypublish_plugins/example_new_plugin.json", + ) diff --git a/ipypublish/port_api/tpl_dct_to_json.py b/ipypublish/port_api/tpl_dct_to_json.py index a257c91..2cf546b 100644 --- a/ipypublish/port_api/tpl_dct_to_json.py +++ b/ipypublish/port_api/tpl_dct_to_json.py @@ -18,9 +18,7 @@ def assess_syntax(path): dct = None dtype = None for i, child in enumerate(ast.iter_child_nodes(syntax_tree)): - if (i == 0 - and isinstance(child, ast.Expr) - and isinstance(child.value, ast.Str)): + if i == 0 and isinstance(child, ast.Expr) and isinstance(child.value, ast.Str): docstring = child.value.s elif isinstance(child, ast.Assign): targets = child.targets @@ -32,8 +30,8 @@ def assess_syntax(path): continue if not isinstance(child.value, ast.Dict): raise ValueError( - "expected {} to be of type Dict: {}".format( - dtype, child.value)) + "expected {} to be of type Dict: {}".format(dtype, child.value) + ) dct = child.value break @@ -43,21 +41,19 @@ def assess_syntax(path): output = {} for key, value in zip(dct.keys, dct.values): if not isinstance(key, ast.Str): - raise ValueError( - "expected {} key to be of type Str: {}".format( - dtype, key)) + raise ValueError("expected {} key to be of type Str: {}".format(dtype, key)) if not isinstance(value, ast.Str): raise ValueError( - "expected {} value be of type Str: {}".format( - dtype, value)) + "expected {} value be of type Str: {}".format(dtype, value) + ) output[key.s] = value.s return { "identifier": os.path.splitext(os.path.basename(path))[0], "description": docstring, "segments": output, - "$schema": "../../schema/segment.schema.json" - } + "$schema": "../../schema/segment.schema.json", + } def py_to_json(path, outpath=None): @@ -83,6 +79,6 @@ def py_to_json(path, outpath=None): else: _prefix = "std-" - _outpath = os.path.join(os.path.dirname(_path), _prefix+_name+_ext) + _outpath = os.path.join(os.path.dirname(_path), _prefix + _name + _ext) py_to_json(_path, _outpath) diff --git a/ipypublish/postprocessors/base.py b/ipypublish/postprocessors/base.py index 8ccdb51..ef1db85 100644 --- a/ipypublish/postprocessors/base.py +++ b/ipypublish/postprocessors/base.py @@ -28,7 +28,7 @@ def allowed_mimetypes(self): text/markdown, text/asciidoc, text/yaml """ - raise NotImplementedError('allowed_mimetypes') + raise NotImplementedError("allowed_mimetypes") @property def requires_path(self): @@ -41,7 +41,7 @@ def requires_path(self): will try to make the directory if it doesn't exist """ - raise NotImplementedError('requires_path') + raise NotImplementedError("requires_path") @property def logger_name(self): @@ -56,7 +56,8 @@ def logger(self): skip_mime = Bool( True, help="if False, raise a TypeError if the mimetype is not allowed, " - "else return without processing").tag(config=True) + "else return without processing", + ).tag(config=True) def __init__(self, config=None): super(IPyPostProcessor, self).__init__(config=config) @@ -67,8 +68,7 @@ def __call__(self, stream, mimetype, filepath, resources=None): """ self.postprocess(stream, mimetype, filepath, resources) - def postprocess(self, stream, mimetype, filepath, - resources=None): + def postprocess(self, stream, mimetype, filepath, resources=None): """ Post-process output. Parameters @@ -90,23 +90,26 @@ def postprocess(self, stream, mimetype, filepath, """ - if (self.allowed_mimetypes is not None - and mimetype not in self.allowed_mimetypes): + if ( + self.allowed_mimetypes is not None + and mimetype not in self.allowed_mimetypes + ): if not self.skip_mime: self.handle_error( "the mimetype {0} is not in the allowed list: {1}".format( - mimetype, self.allowed_mimetypes), - TypeError) + mimetype, self.allowed_mimetypes + ), + TypeError, + ) else: - self.logger.debug( - "skipping incorrect mime type: {}".format(mimetype)) + self.logger.debug("skipping incorrect mime type: {}".format(mimetype)) return stream, filepath, resources if self.requires_path and filepath is None: self.handle_error( - "the filepath is None, " - "but the post-processor requires a folder", - IOError) + "the filepath is None, " "but the post-processor requires a folder", + IOError, + ) if filepath is not None and isinstance(filepath, string_types): filepath = pathlib.Path(filepath) @@ -115,14 +118,14 @@ def postprocess(self, stream, mimetype, filepath, if not filepath.is_absolute(): self.handle_error( - "the post-processor requires an absolute folder path", - IOError) + "the post-processor requires an absolute folder path", IOError + ) if filepath.parent.exists() and not filepath.parent.is_dir(): self.handle_error( - "the filepath's parent is not a folder: {}".format( - filepath), - TypeError) + "the filepath's parent is not a folder: {}".format(filepath), + TypeError, + ) if not filepath.parent.exists(): filepath.parent.mkdir(parents=True) @@ -152,14 +155,12 @@ def run_postprocess(self, stream, mimetype, filepath, resources): resources: dict """ - raise NotImplementedError('run_postprocess') + raise NotImplementedError("run_postprocess") - def handle_error(self, msg, err_type, - raise_msg=None, log_msg=None): + def handle_error(self, msg, err_type, raise_msg=None, log_msg=None): """ handle error by logging it then raising """ - handle_error(msg, err_type, self.logger, - raise_msg=raise_msg, log_msg=log_msg) + handle_error(msg, err_type, self.logger, raise_msg=raise_msg, log_msg=log_msg) def check_exe_exists(self, name, error_msg): """ test if an executable exists diff --git a/ipypublish/postprocessors/convert_bibgloss.py b/ipypublish/postprocessors/convert_bibgloss.py index cd1f627..f35f8f0 100644 --- a/ipypublish/postprocessors/convert_bibgloss.py +++ b/ipypublish/postprocessors/convert_bibgloss.py @@ -8,6 +8,7 @@ class ConvertBibGloss(IPyPostProcessor): """ convert a bibglossary to the required format """ + @property def allowed_mimetypes(self): return None @@ -20,19 +21,15 @@ def requires_path(self): def logger_name(self): return "convert-bibgloss" - encoding = Unicode( - "utf8", - help="the encoding of the input file" - ).tag(config=True) + encoding = Unicode("utf8", help="the encoding of the input file").tag(config=True) resource_key = Unicode( "bibglosspath", - help="the key in the resources dict containing the path to the file" + help="the key in the resources dict containing the path to the file", ).tag(config=True) files_folder = Unicode( - "_static", - help="the path (relative to the main file path) to dump to" + "_static", help="the path (relative to the main file path) to dump to" ).tag(config=True) def run_postprocess(self, stream, mimetype, filepath, resources): @@ -45,7 +42,8 @@ def run_postprocess(self, stream, mimetype, filepath, resources): if not os.path.exists(str(bibpath)): self.logger.warning( "the bibglossary could not be converted, " - "since its path does not exist: {}".format(bibpath)) + "since its path does not exist: {}".format(bibpath) + ) return stream, filepath, resources bibname, extension = os.path.splitext(os.path.basename(bibpath)) @@ -76,7 +74,8 @@ def run_postprocess(self, stream, mimetype, filepath, resources): self.logger.warning( "the bibglossary could not be converted, " "since its file extension was not one of: " - "bib, tex") + "bib, tex" + ) if outstr is None: return stream, filepath, resources diff --git a/ipypublish/postprocessors/file_actions.py b/ipypublish/postprocessors/file_actions.py index 12615f1..4219d19 100644 --- a/ipypublish/postprocessors/file_actions.py +++ b/ipypublish/postprocessors/file_actions.py @@ -8,10 +8,17 @@ class WriteTextFile(IPyPostProcessor): """ write the stream to a text based file """ + @property def allowed_mimetypes(self): - return ("text/latex", "text/restructuredtext", "text/html", - "text/x-python", "application/json", "text/markdown") + return ( + "text/latex", + "text/restructuredtext", + "text/html", + "text/x-python", + "application/json", + "text/markdown", + ) @property def requires_path(self): @@ -21,14 +28,11 @@ def requires_path(self): def logger_name(self): return "write-text-file" - encoding = Unicode( - "utf8", - help="the encoding of the output file" - ).tag(config=True) + encoding = Unicode("utf8", help="the encoding of the output file").tag(config=True) def run_postprocess(self, stream, mimetype, filepath, resources): - self.logger.info('writing stream to file: {}'.format(filepath)) + self.logger.info("writing stream to file: {}".format(filepath)) with filepath.open("w", encoding=self.encoding) as fh: fh.write(stream) @@ -38,6 +42,7 @@ def run_postprocess(self, stream, mimetype, filepath, resources): class RemoveFolder(IPyPostProcessor): """ remove a folder and all its contents """ + @property def allowed_mimetypes(self): return None @@ -51,16 +56,14 @@ def logger_name(self): return "remove-folder" files_folder = Unicode( - "_static", - help="the path (relative to the main file path) to remove" + "_static", help="the path (relative to the main file path) to remove" ).tag(config=True) def run_postprocess(self, stream, mimetype, filepath, resources): remove_folder = filepath.parent.joinpath(self.files_folder) if remove_folder.exists() and remove_folder.is_dir(): - self.logger.info( - 'removing folder: {0}'.format(remove_folder)) + self.logger.info("removing folder: {0}".format(remove_folder)) shutil.rmtree(str(remove_folder)) return stream, filepath, resources @@ -69,6 +72,7 @@ def run_postprocess(self, stream, mimetype, filepath, resources): class WriteResourceFiles(IPyPostProcessor): """ write content contained in the resources dict to file (as bytes) """ + @property def allowed_mimetypes(self): return None @@ -84,7 +88,7 @@ def logger_name(self): resource_keys = List( Unicode(), ["outputs"], - help="the key names in the resources dict that contain files" + help="the key names in the resources dict that contain files", ).tag(config=True) # The files already have a relative path @@ -105,10 +109,11 @@ def run_postprocess(self, stream, mimetype, filepath, resources): if not hasattr(resources[key], "items"): self.handle_error( "the value of resources[{0}] is not a mapping".format(key), - TypeError) + TypeError, + ) self.logger.info( - 'writing files in resources[{0}] to: {1}'.format( - key, output_folder)) + "writing files in resources[{0}] to: {1}".format(key, output_folder) + ) for filename, content in resources[key].items(): outpath = output_folder.joinpath(filename) @@ -126,6 +131,7 @@ def run_postprocess(self, stream, mimetype, filepath, resources): class CopyResourcePaths(IPyPostProcessor): """ copy filepaths in the resources dict to another folder """ + @property def allowed_mimetypes(self): return None @@ -141,12 +147,11 @@ def logger_name(self): resource_keys = List( Unicode(), ["external_file_paths"], - help="the key names in the resources dict that contain filepaths" + help="the key names in the resources dict that contain filepaths", ).tag(config=True) files_folder = Unicode( - "_static", - help="the path (relative to the main file path) to copy to" + "_static", help="the path (relative to the main file path) to copy to" ).tag(config=True) def run_postprocess(self, stream, mimetype, filepath, resources): @@ -160,14 +165,16 @@ def run_postprocess(self, stream, mimetype, filepath, resources): continue if not isinstance(resources[key], (list, tuple, set)): self.handle_error( - "the value of resources[{0}] is not an iterable".format( - key), TypeError) + "the value of resources[{0}] is not an iterable".format(key), + TypeError, + ) self.logger.info( - 'copying files in resources[{0}] to: {1}'.format( - key, output_folder)) + "copying files in resources[{0}] to: {1}".format(key, output_folder) + ) for resfilepath in resources[key]: - shutil.copyfile(resfilepath, - str(output_folder.joinpath( - os.path.basename(resfilepath)))) + shutil.copyfile( + resfilepath, + str(output_folder.joinpath(os.path.basename(resfilepath))), + ) return stream, filepath, resources diff --git a/ipypublish/postprocessors/pdfexport.py b/ipypublish/postprocessors/pdfexport.py index 175db16..26c16b8 100755 --- a/ipypublish/postprocessors/pdfexport.py +++ b/ipypublish/postprocessors/pdfexport.py @@ -17,9 +17,10 @@ class PDFExport(IPyPostProcessor): """ a post processor to convert tex to pdf using latexmk """ + @property def allowed_mimetypes(self): - return ("text/latex") + return "text/latex" @property def requires_path(self): @@ -31,23 +32,19 @@ def logger_name(self): files_folder = Unicode( "_static", - help="the path (relative to the main file path) " - "containing external files" + help="the path (relative to the main file path) " "containing external files", ).tag(config=True) convert_in_temp = Bool( False, - help="run conversion in a temporary directory, " - "and copy back only PDF file" + help="run conversion in a temporary directory, " "and copy back only PDF file", ).tag(config=True) - debug_mode = Bool( - False, - help="run in debug mode").tag(config=True) + debug_mode = Bool(False, help="run in debug mode").tag(config=True) open_in_browser = Bool( - False, - help="launch a html page containing a pdf browser").tag(config=True) + False, help="launch a html page containing a pdf browser" + ).tag(config=True) def run_postprocess(self, stream, mimetype, filepath, resources): """ should not be called directly @@ -65,7 +62,7 @@ def run_postprocess(self, stream, mimetype, filepath, resources): filepath: None or pathlib.Path """ - self.logger.info('running pdf conversion') + self.logger.info("running pdf conversion") self._export_pdf(filepath) return stream, filepath, resources @@ -73,8 +70,8 @@ def _export_pdf(self, texpath): if not texpath.exists(): self.handle_error( - 'the target file path does not exist: {}'.format( - texpath), IOError) + "the target file path does not exist: {}".format(texpath), IOError + ) texname = os.path.splitext(texpath.name)[0] # NOTE outdir was originally passed, but would this ever be different @@ -84,43 +81,44 @@ def _export_pdf(self, texpath): if external_files.exists() and not external_files.is_dir(): self.handle_error( - 'the external folder path is not a directory: {}'.format( - external_files), IOError) + "the external folder path is not a directory: {}".format( + external_files + ), + IOError, + ) self.check_exe_exists( - 'latexmk', - 'requires the latexmk executable to run. ' - 'See http://mg.readthedocs.io/latexmk.html#installation', + "latexmk", + "requires the latexmk executable to run. " + "See http://mg.readthedocs.io/latexmk.html#installation", ) if self.convert_in_temp: out_folder = tempfile.mkdtemp() try: - exitcode = self._run_latexmk( - texpath, out_folder, external_files) + exitcode = self._run_latexmk(texpath, out_folder, external_files) if exitcode == 0: shutil.copyfile( - os.path.join(out_folder, texname + '.pdf'), - str(texpath.parent.joinpath(texname + '.pdf'))) + os.path.join(out_folder, texname + ".pdf"), + str(texpath.parent.joinpath(texname + ".pdf")), + ) finally: shutil.rmtree(out_folder) else: - exitcode = self._run_latexmk( - texpath, str(texpath.parent), external_files) + exitcode = self._run_latexmk(texpath, str(texpath.parent), external_files) if exitcode == 0: - self.logger.info('pdf conversion complete') + self.logger.info("pdf conversion complete") - view_pdf = VIEW_PDF.format( - pdf_name=texname.replace(' ', '%20') + '.pdf') - view_pdf_path = texpath.parent.joinpath(texname + '.view_pdf.html') - with view_pdf_path.open('w', encoding='utf-8') as fobj: + view_pdf = VIEW_PDF.format(pdf_name=texname.replace(" ", "%20") + ".pdf") + view_pdf_path = texpath.parent.joinpath(texname + ".view_pdf.html") + with view_pdf_path.open("w", encoding="utf-8") as fobj: fobj.write(six.u(view_pdf)) else: self.handle_error( - 'pdf conversion failed: ' - 'Try running with pdf-debug flag', - RuntimeError) + "pdf conversion failed: " "Try running with pdf-debug flag", + RuntimeError, + ) if self.open_in_browser: # 2 opens the url in a new tab @@ -134,33 +132,33 @@ def _run_latexmk(self, texpath, out_folder, external_files): # make sure tex file in right place outpath = os.path.join(out_folder, texpath.name) if os.path.dirname(str(texpath)) != str(out_folder): - self.logger.debug('copying tex file to: {}'.format( - os.path.join(str(out_folder), texpath.name))) - shutil.copyfile(str(texpath), os.path.join( - str(out_folder), texpath.name)) + self.logger.debug( + "copying tex file to: {}".format( + os.path.join(str(out_folder), texpath.name) + ) + ) + shutil.copyfile(str(texpath), os.path.join(str(out_folder), texpath.name)) # make sure the external files folder is in right place if external_files.exists(): - self.logger.debug('external files folder set') + self.logger.debug("external files folder set") outfilespath = os.path.join(out_folder, str(external_files.name)) if str(external_files) != str(outfilespath): - self.logger.debug( - 'copying external files to: {}'.format(outfilespath)) + self.logger.debug("copying external files to: {}".format(outfilespath)) if os.path.exists(outfilespath): shutil.rmtree(outfilespath) shutil.copytree(str(external_files), str(outfilespath)) # run latexmk in correct folder with change_dir(out_folder): - latexmk = ['latexmk', '-xelatex', '-bibtex', '-pdf'] + latexmk = ["latexmk", "-xelatex", "-bibtex", "-pdf"] latexmk += [] if self.debug_mode else ["--interaction=batchmode"] latexmk += [outpath] - self.logger.info('running: ' + ' '.join(latexmk)) + self.logger.info("running: " + " ".join(latexmk)) def log_latexmk_output(pipe): - for line in iter(pipe.readline, b''): - self.logger.info('latexmk: {}'.format( - line.decode("utf-8").strip())) + for line in iter(pipe.readline, b""): + self.logger.info("latexmk: {}".format(line.decode("utf-8").strip())) process = Popen(latexmk, stdout=PIPE, stderr=STDOUT) with process.stdout: diff --git a/ipypublish/postprocessors/reveal_serve.py b/ipypublish/postprocessors/reveal_serve.py index d19beea..f998156 100644 --- a/ipypublish/postprocessors/reveal_serve.py +++ b/ipypublish/postprocessors/reveal_serve.py @@ -23,12 +23,18 @@ class ProxyHandler(web.RequestHandler): @gen.coroutine def get(self, prefix, url): """proxy a request to a CDN""" - proxy_url = '/'.join([self.settings['cdn'], url]) - client = self.settings['client'] + proxy_url = "/".join([self.settings["cdn"], url]) + client = self.settings["client"] client.fetch(proxy_url) response = yield client.fetch(proxy_url) - for header in ['Content-Type', 'Cache-Control', 'Date', 'Last-Modified', 'Expires']: + for header in [ + "Content-Type", + "Cache-Control", + "Date", + "Last-Modified", + "Expires", + ]: if header in response.headers: self.set_header(header, response.headers[header]) self.finish(response.body) @@ -42,7 +48,7 @@ class RevealServer(IPyPostProcessor): @property def allowed_mimetypes(self): - return ('text/html') + return "text/html" @property def requires_path(self): @@ -50,51 +56,58 @@ def requires_path(self): @property def logger_name(self): - return 'reveal-server' + return "reveal-server" - open_in_browser = Bool(True, help='Should the browser be opened automatically?').tag(config=True) + open_in_browser = Bool( + True, help="Should the browser be opened automatically?" + ).tag(config=True) - reveal_cdn = Unicode('https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.1.0', - help="""URL for reveal.js CDN.""").tag(config=True) + reveal_cdn = Unicode( + "https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.1.0", + help="""URL for reveal.js CDN.""", + ).tag(config=True) - reveal_prefix = Unicode('reveal.js', help='URL prefix for reveal.js').tag(config=True) + reveal_prefix = Unicode("reveal.js", help="URL prefix for reveal.js").tag( + config=True + ) - ip = Unicode('127.0.0.1', help='The IP address to listen on.').tag(config=True) + ip = Unicode("127.0.0.1", help="The IP address to listen on.").tag(config=True) - port = Int(8000, help='port for the server to listen on.').tag(config=True) + port = Int(8000, help="port for the server to listen on.").tag(config=True) def run_postprocess(self, stream, mimetype, filepath, resources): """Serve the build directory with a webserver.""" if not filepath.exists(): - self.handle_error('the target file path does not exist: {}'.format(filepath), IOError) + self.handle_error( + "the target file path does not exist: {}".format(filepath), IOError + ) # TODO rewrite this as pathlib dirname, filename = os.path.split(str(filepath)) - handlers = [(r'/(.+)', web.StaticFileHandler, { - 'path': dirname - }), (r'/', web.RedirectHandler, { - 'url': '/%s' % filename - })] + handlers = [ + (r"/(.+)", web.StaticFileHandler, {"path": dirname}), + (r"/", web.RedirectHandler, {"url": "/%s" % filename}), + ] - if '://' in self.reveal_prefix or self.reveal_prefix.startswith('//'): + if "://" in self.reveal_prefix or self.reveal_prefix.startswith("//"): # reveal specifically from CDN, nothing to do pass elif os.path.isdir(os.path.join(dirname, self.reveal_prefix)): # reveal prefix exists - self.logger.info('Serving local %s', self.reveal_prefix) - self.logger.info('Serving local %s', self.reveal_prefix) + self.logger.info("Serving local %s", self.reveal_prefix) + self.logger.info("Serving local %s", self.reveal_prefix) else: - self.logger.info('Redirecting %s requests to %s', self.reveal_prefix, self.reveal_cdn) - self.logger.info('Redirecting %s requests to %s', self.reveal_prefix, self.reveal_cdn) - handlers.insert(0, (r'/(%s)/(.*)' % self.reveal_prefix, ProxyHandler)) + self.logger.info( + "Redirecting %s requests to %s", self.reveal_prefix, self.reveal_cdn + ) + self.logger.info( + "Redirecting %s requests to %s", self.reveal_prefix, self.reveal_cdn + ) + handlers.insert(0, (r"/(%s)/(.*)" % self.reveal_prefix, ProxyHandler)) - app = web.Application( - handlers, - cdn=self.reveal_cdn, - client=AsyncHTTPClient(), - ) + app = web.Application(handlers, cdn=self.reveal_cdn, client=AsyncHTTPClient()) # hook up tornado logging to our self.logger log.app_log = self.logger @@ -105,21 +118,24 @@ def run_postprocess(self, stream, mimetype, filepath, resources): port_attempts = list(range(10)) for port_attempt in port_attempts: try: - url = 'http://%s:%i/%s' % (self.ip, self.port, filename) - self.logger.info('Attempting to serve at %s' % url) + url = "http://%s:%i/%s" % (self.ip, self.port, filename) + self.logger.info("Attempting to serve at %s" % url) http_server.listen(self.port, address=self.ip) break except IOError: self.port += 1 if port_attempt == port_attempts[-1]: - self.handle_error('no port available to launch slides on, ' 'try closing some slideshows', IOError) + self.handle_error( + "no port available to launch slides on, " "try closing some slideshows", + IOError, + ) - self.logger.info('Serving your slides at %s' % url) - self.logger.info('Use Control-C to stop this server') + self.logger.info("Serving your slides at %s" % url) + self.logger.info("Use Control-C to stop this server") # don't let people press ctrl-z, which leaves port open def handler(signum, frame): - self.logger.info('Control-Z pressed, but ignored, use Control-C!') + self.logger.info("Control-Z pressed, but ignored, use Control-C!") signal.signal(signal.SIGTSTP, handler) @@ -132,6 +148,6 @@ def handler(signum, frame): except KeyboardInterrupt: # dosen't look like line below is necessary ioloop.IOLoop.instance().stop() - self.logger.info('\nInterrupted') + self.logger.info("\nInterrupted") return stream, filepath, resources diff --git a/ipypublish/postprocessors/sphinx.py b/ipypublish/postprocessors/sphinx.py index d948833..71d3b50 100644 --- a/ipypublish/postprocessors/sphinx.py +++ b/ipypublish/postprocessors/sphinx.py @@ -16,9 +16,11 @@ # NOTE Interesting note about adding a directive to actually run python code # https://stackoverflow.com/questions/7250659/how-to-use-python-to-programmatically-generate-part-of-sphinx-documentation + class RunSphinx(IPyPostProcessor): """ run sphinx to create an html output """ + @property def allowed_mimetypes(self): return ("text/restructuredtext",) @@ -32,43 +34,41 @@ def logger_name(self): return "run-sphinx" open_in_browser = Bool( - True, - help="launch a html page containing a pdf browser").tag(config=True) + True, help="launch a html page containing a pdf browser" + ).tag(config=True) numbered = Bool( - True, - help="set :numbered: in toc, which numbers sections, etc" + True, help="set :numbered: in toc, which numbers sections, etc" ).tag(config=True) - show_prompts = Bool( - True, - help="whether to include cell prompts").tag(config=True) + show_prompts = Bool(True, help="whether to include cell prompts").tag(config=True) - prompt_style = Unicode( - '[{count}]:', - help="the style of cell prompts").tag(config=True) + prompt_style = Unicode("[{count}]:", help="the style of cell prompts").tag( + config=True + ) - @validate('prompt_style') + @validate("prompt_style") def _valid_prompt_style(self, proposal): try: proposal.format(count=1) except TypeError: - raise TraitError("prompt_style should be formatable by " - "`prompt_style.format(count=1)`") - return proposal['value'] + raise TraitError( + "prompt_style should be formatable by " "`prompt_style.format(count=1)`" + ) + return proposal["value"] conf_kwargs = Dict( - help=("additional key-word arguments to be included in the conf.py " - "as = ")).tag(config=True) + help=( + "additional key-word arguments to be included in the conf.py " + "as = " + ) + ).tag(config=True) override_defaults = Bool( - True, - help="if True, conf_kwargs override default values").tag(config=True) + True, help="if True, conf_kwargs override default values" + ).tag(config=True) - nitpick = Bool( - False, - help="nit-picky mode, warn about all missing references" - ) + nitpick = Bool(False, help="nit-picky mode, warn about all missing references") def run_postprocess(self, stream, mimetype, filepath, resources): @@ -122,36 +122,38 @@ def run_postprocess(self, stream, mimetype, filepath, resources): epilog.append("| " + inst) epilog.append("") - epilog.append('Created by IPyPublish (version {})'.format(__version__)) + epilog.append("Created by IPyPublish (version {})".format(__version__)) toc = resources.get("ipub", {}).get("toc", {}) if hasattr(toc, "get") and "depth" in toc: toc_depth = toc["depth"] - index_str = make_index(toc_files, - toc_depth=toc_depth, header=title, - toc_numbered=self.numbered, - prolog="\n".join(prolog), - epilog="\n".join(epilog)) + index_str = make_index( + toc_files, + toc_depth=toc_depth, + header=title, + toc_numbered=self.numbered, + prolog="\n".join(prolog), + epilog="\n".join(epilog), + ) index_path = filepath.parent.joinpath("index.rst") with index_path.open("w", encoding="utf8") as f: f.write(u(index_str)) # clear any existing build - build_dir = filepath.parent.joinpath('build/html') + build_dir = filepath.parent.joinpath("build/html") if build_dir.exists(): # >> rm -r build/html shutil.rmtree(str(build_dir)) build_dir.mkdir(parents=True) # run sphinx - exec_path = find_executable('sphinx-build') + exec_path = find_executable("sphinx-build") args = [exec_path, "-b", "html"] if self.nitpick: args.append("-n") - args.extend([str(filepath.parent.absolute()), - str(build_dir.absolute())]) + args.extend([str(filepath.parent.absolute()), str(build_dir.absolute())]) self.logger.info("running: " + " ".join(args)) @@ -160,9 +162,8 @@ def run_postprocess(self, stream, mimetype, filepath, resources): # self.logger, "sphinx") def log_process_output(pipe): - for line in iter(pipe.readline, b''): - self.logger.info('{}'.format( - line.decode("utf-8").strip())) + for line in iter(pipe.readline, b""): + self.logger.info("{}".format(line.decode("utf-8").strip())) process = Popen(args, stdout=PIPE, stderr=STDOUT) with process.stdout: @@ -170,19 +171,18 @@ def log_process_output(pipe): exitcode = process.wait() # 0 means success if exitcode: - self.logger.warn( - "sphinx-build exited with code: {}".format(exitcode)) + self.logger.warning("sphinx-build exited with code: {}".format(exitcode)) if self.open_in_browser and not exitcode: # get entry path - entry_path = filepath.parent.joinpath('build/html') + entry_path = filepath.parent.joinpath("build/html") entry_path = entry_path.joinpath( - os.path.splitext(filepath.name)[0] + '.html') + os.path.splitext(filepath.name)[0] + ".html" + ) if entry_path.exists(): # 2 opens the url in a new tab webbrowser.open(entry_path.as_uri(), new=2) else: - self.handle_error( - "can't find {0} to open".format(entry_path), IOError) + self.handle_error("can't find {0} to open".format(entry_path), IOError) return stream, filepath, resources diff --git a/ipypublish/postprocessors/stream_modify.py b/ipypublish/postprocessors/stream_modify.py index 2922769..f0bfe98 100644 --- a/ipypublish/postprocessors/stream_modify.py +++ b/ipypublish/postprocessors/stream_modify.py @@ -8,10 +8,17 @@ class RemoveBlankLines(IPyPostProcessor): """ remove multiple lines of blank space """ + @property def allowed_mimetypes(self): - return ("text/latex", "text/restructuredtext", "text/html", - "text/x-python", "application/json", "text/markdown") + return ( + "text/latex", + "text/restructuredtext", + "text/html", + "text/x-python", + "application/json", + "text/markdown", + ) @property def requires_path(self): @@ -22,16 +29,22 @@ def logger_name(self): return "remove-blank-lines" def run_postprocess(self, stream, mimetype, filepath, resources): - stream = re.sub(r'\n\s*\n', '\n\n', stream) + stream = re.sub(r"\n\s*\n", "\n\n", stream) return stream, filepath, resources class RemoveTrailingSpace(IPyPostProcessor): """ remove trailing whitespace on each line """ + @property def allowed_mimetypes(self): - return ("text/latex", "text/restructuredtext", - "text/x-python", "application/json", "text/markdown") + return ( + "text/latex", + "text/restructuredtext", + "text/x-python", + "application/json", + "text/markdown", + ) @property def requires_path(self): @@ -49,6 +62,7 @@ def run_postprocess(self, stream, mimetype, filepath, resources): class FilterOutputFiles(IPyPostProcessor): """ filter internal files in resources['outputs'], by those that are referenced in the stream """ + @property def allowed_mimetypes(self): return None @@ -63,19 +77,20 @@ def logger_name(self): def run_postprocess(self, stream, mimetype, filepath, resources): - if 'outputs' in resources: - for path in list(resources['outputs'].keys()): + if "outputs" in resources: + for path in list(resources["outputs"].keys()): if path not in stream: - resources['outputs'].pop(path) + resources["outputs"].pop(path) return stream, filepath, resources class FixSlideReferences(IPyPostProcessor): """ make sure references refer to correct slides """ + @property def allowed_mimetypes(self): - return ("text/html") + return "text/html" @property def requires_path(self): @@ -86,8 +101,10 @@ def logger_name(self): return "fix-slide-refs" def run_postprocess(self, stream, mimetype, filepath, resources): - if resources and 'refslide' in resources: - for k, (col, row) in resources['refslide'].items(): - stream = stream.replace('{{id_home_prefix}}{0}'.format( - k), '#/{0}/{1}{2}'.format(col, row, k)) + if resources and "refslide" in resources: + for k, (col, row) in resources["refslide"].items(): + stream = stream.replace( + "{{id_home_prefix}}{0}".format(k), + "#/{0}/{1}{2}".format(col, row, k), + ) return stream, filepath, resources diff --git a/ipypublish/postprocessors/to_stream.py b/ipypublish/postprocessors/to_stream.py index 5838520..098408d 100644 --- a/ipypublish/postprocessors/to_stream.py +++ b/ipypublish/postprocessors/to_stream.py @@ -7,10 +7,17 @@ class WriteStream(IPyPostProcessor): """ write the stream to the terminal """ + @property def allowed_mimetypes(self): - return ("text/latex", "text/restructuredtext", "text/html", - "text/x-python", "application/json", "text/markdown") + return ( + "text/latex", + "text/restructuredtext", + "text/html", + "text/x-python", + "application/json", + "text/markdown", + ) @property def requires_path(self): @@ -21,22 +28,21 @@ def logger_name(self): return "write-text-file" encoding = Unicode( - default_value="utf8", - help="the encoding of the output file" + default_value="utf8", help="the encoding of the output file" ).tag(config=True) pipe = Enum( ["stdout", "stderr", "stdin"], default_value="stdout", - help="where to write the output to" + help="where to write the output to", ).tag(config=True) def run_postprocess(self, stream, mimetype, filepath, resources): - self.logger.info('writing stream to {}'.format(self.pipe)) - io_type = {"stdout": sys.stdout, - "stdin": sys.stdin, - "stderr": sys.stderr}.get(self.pipe) + self.logger.info("writing stream to {}".format(self.pipe)) + io_type = {"stdout": sys.stdout, "stdin": sys.stdin, "stderr": sys.stderr}.get( + self.pipe + ) io_type.write(stream) return stream, filepath, resources diff --git a/ipypublish/preprocessors/crop_cells.py b/ipypublish/preprocessors/crop_cells.py index e848dfc..396b8a2 100644 --- a/ipypublish/preprocessors/crop_cells.py +++ b/ipypublish/preprocessors/crop_cells.py @@ -7,15 +7,18 @@ class CropCells(Preprocessor): """A preprocessor to crop the notebook cells from to """ - start = traits.Integer( - 0, help="first cell of notebook to be converted").tag(config=True) - end = traits.Integer(-1, - help="last cell of notebook to be converted" - ).tag(config=True) + start = traits.Integer(0, help="first cell of notebook to be converted").tag( + config=True + ) + end = traits.Integer(-1, help="last cell of notebook to be converted").tag( + config=True + ) def preprocess(self, nb, resources): logging.info( - 'preprocessing notebook: cropping cells {0} to {1}'.format( - self.start, self.end)) - nb.cells = nb.cells[self.start:self.end] + "preprocessing notebook: cropping cells {0} to {1}".format( + self.start, self.end + ) + ) + nb.cells = nb.cells[self.start : self.end] return nb, resources diff --git a/ipypublish/preprocessors/latex_doc_captions.py b/ipypublish/preprocessors/latex_doc_captions.py index 8e04355..be0e08e 100644 --- a/ipypublish/preprocessors/latex_doc_captions.py +++ b/ipypublish/preprocessors/latex_doc_captions.py @@ -18,46 +18,49 @@ class LatexCaptions(Preprocessor): add_prefix = traits.Bool( False, - help=("add float type/number prefix to caption " - "(from caption_prefix tag)")).tag(config=True) + help=("add float type/number prefix to caption " "(from caption_prefix tag)"), + ).tag(config=True) def preprocess(self, nb, resources): - logger.info('extracting caption cells') + logger.info("extracting caption cells") # extract captions final_cells = [] captions = {} for cell in nb.cells: - if hasattr(cell.metadata, 'ipub'): - - if hasattr(cell.metadata.ipub.get('equation', False), 'get'): - if hasattr(cell.metadata.ipub.equation.get( - 'environment', False), 'startswith'): - if cell.metadata.ipub.equation.environment.startswith('breqn'): # noqa: E501 + if hasattr(cell.metadata, "ipub"): + + if hasattr(cell.metadata.ipub.get("equation", False), "get"): + if hasattr( + cell.metadata.ipub.equation.get("environment", False), + "startswith", + ): + if cell.metadata.ipub.equation.environment.startswith( + "breqn" + ): # noqa: E501 if "ipub" not in nb.metadata: nb.metadata["ipub"] = NotebookNode( - {'enable_breqn': True}) + {"enable_breqn": True} + ) else: - nb.metadata.ipub['enable_breqn'] = True + nb.metadata.ipub["enable_breqn"] = True - if hasattr(cell.metadata.ipub, 'caption'): + if hasattr(cell.metadata.ipub, "caption"): - if cell.cell_type == 'markdown': - capt = cell.source.split(r'\n')[0] + if cell.cell_type == "markdown": + capt = cell.source.split(r"\n")[0] captions[cell.metadata.ipub.caption] = capt continue - elif cell.cell_type == 'code': + elif cell.cell_type == "code": if not cell.outputs: pass - elif "text/latex" in cell.outputs[0].get('data', {}): - capt = cell.outputs[0].data["text/latex"].split( - r'\n')[0] + elif "text/latex" in cell.outputs[0].get("data", {}): + capt = cell.outputs[0].data["text/latex"].split(r"\n")[0] captions[cell.metadata.ipub.caption] = capt continue - elif "text/plain" in cell.outputs[0].get('data', {}): - capt = cell.outputs[0].data["text/plain"].split( - r'\n')[0] + elif "text/plain" in cell.outputs[0].get("data", {}): + capt = cell.outputs[0].data["text/plain"].split(r"\n")[0] captions[cell.metadata.ipub.caption] = capt continue @@ -66,22 +69,27 @@ def preprocess(self, nb, resources): # replace captions for cell in nb.cells: - if hasattr(cell.metadata, 'ipub'): + if hasattr(cell.metadata, "ipub"): for key in cell.metadata.ipub: - if hasattr(cell.metadata.ipub[key], 'label'): - if cell.metadata.ipub[key]['label'] in captions: - logger.debug('replacing caption for: {}'.format( - cell.metadata.ipub[key]['label'])) - cell.metadata.ipub[key]['caption'] = captions[cell.metadata.ipub[key]['label']] # noqa: E501 + if hasattr(cell.metadata.ipub[key], "label"): + if cell.metadata.ipub[key]["label"] in captions: + logger.debug( + "replacing caption for: {}".format( + cell.metadata.ipub[key]["label"] + ) + ) + cell.metadata.ipub[key]["caption"] = captions[ + cell.metadata.ipub[key]["label"] + ] # noqa: E501 # add float type/number prefix to caption, if required if self.add_prefix: - if hasattr(cell.metadata.ipub[key], 'caption'): - if hasattr(cell.metadata.ipub[key], - 'caption_prefix'): + if hasattr(cell.metadata.ipub[key], "caption"): + if hasattr(cell.metadata.ipub[key], "caption_prefix"): newcaption = ( - cell.metadata.ipub[key].caption_prefix + - cell.metadata.ipub[key].caption) + cell.metadata.ipub[key].caption_prefix + + cell.metadata.ipub[key].caption + ) cell.metadata.ipub[key].caption = newcaption return nb, resources diff --git a/ipypublish/preprocessors/latex_doc_defaults.py b/ipypublish/preprocessors/latex_doc_defaults.py index f0171db..f54c435 100644 --- a/ipypublish/preprocessors/latex_doc_defaults.py +++ b/ipypublish/preprocessors/latex_doc_defaults.py @@ -5,7 +5,7 @@ from nbformat.notebooknode import NotebookNode -def flatten(d, key_as_tuple=True, sep='.'): +def flatten(d, key_as_tuple=True, sep="."): """ get nested dict as {key:val,...}, where key is tuple/string of all nested keys @@ -35,11 +35,12 @@ def flatten(d, key_as_tuple=True, sep='.'): def expand(key, value): if isinstance(value, dict): if key_as_tuple: - return [(key + k, v) - for k, v in flatten(value, key_as_tuple).items()] + return [(key + k, v) for k, v in flatten(value, key_as_tuple).items()] else: - return [(str(key) + sep + k, v) - for k, v in flatten(value, key_as_tuple).items()] + return [ + (str(key) + sep + k, v) + for k, v in flatten(value, key_as_tuple).items() + ] else: return [(key, value)] @@ -58,18 +59,18 @@ class MetaDefaults(Preprocessor): """ nb_defaults = traits.Dict( - default_value={}, - help='dict of notebook level defaults').tag(config=True) + default_value={}, help="dict of notebook level defaults" + ).tag(config=True) cell_defaults = traits.Dict( - default_value={}, - help='dict of cell level defaults').tag(config=True) + default_value={}, help="dict of cell level defaults" + ).tag(config=True) overwrite = traits.Bool( - False, - help="whether existing values should be overwritten").tag(config=True) + False, help="whether existing values should be overwritten" + ).tag(config=True) def preprocess(self, nb, resources): - logging.info('adding ipub defaults to notebook') + logging.info("adding ipub defaults to notebook") for keys, val in flatten(self.nb_defaults).items(): dct = nb.metadata @@ -94,7 +95,7 @@ def preprocess(self, nb, resources): dct[key] = NotebookNode({}) elif dct[key] is True: dct[key] = NotebookNode({}) - elif not hasattr(dct[key], 'items'): + elif not hasattr(dct[key], "items"): leaf_not_dict = True break dct = dct[key] diff --git a/ipypublish/preprocessors/latex_doc_html.py b/ipypublish/preprocessors/latex_doc_html.py index 4e543b3..c8fb0bc 100644 --- a/ipypublish/preprocessors/latex_doc_html.py +++ b/ipypublish/preprocessors/latex_doc_html.py @@ -17,21 +17,20 @@ class LatexDocHTML(Preprocessor): """ - metapath = traits.Unicode( - '', help="the path to the meta data").tag(config=True) - filesfolder = traits.Unicode( - '', help="the folder to point towards").tag(config=True) + metapath = traits.Unicode("", help="the path to the meta data").tag(config=True) + filesfolder = traits.Unicode("", help="the folder to point towards").tag( + config=True + ) src_name = traits.Unicode( - 'src', - help=("for embedding, if reveal js slides use data-src " - "(for lazy loading)")).tag( - config=True) + "src", + help=("for embedding, if reveal js slides use data-src " "(for lazy loading)"), + ).tag(config=True) - @traits.validate('src_name') + @traits.validate("src_name") def _valid_value(self, proposal): - if proposal['value'] not in ['src', 'data-src']: - raise traits.TraitError('src_name must be src or src or data-src') - return proposal['value'] + if proposal["value"] not in ["src", "data-src"]: + raise traits.TraitError("src_name must be src or src or data-src") + return proposal["value"] def __init__(self, *args, **kwargs): super(LatexDocHTML, self).__init__(*args, **kwargs) @@ -46,93 +45,109 @@ def resolve_path(self, fpath, filepath): def embed_html(self, cell, path): """ a new cell, based on embedded html file """ - logging.info('embedding html in notebook from: {}'.format(path)) + logging.info("embedding html in notebook from: {}".format(path)) - height = int(cell.metadata.ipub.embed_html.get('height', 0.5) * 100) - width = int(cell.metadata.ipub.embed_html.get('width', 0.5) * 100) + height = int(cell.metadata.ipub.embed_html.get("height", 0.5) * 100) + width = int(cell.metadata.ipub.embed_html.get("width", 0.5) * 100) embed_code = ( '').format( - src=self.src_name, path=path, height=height, width=width) + "" + ).format(src=self.src_name, path=path, height=height, width=width) # add to the exising output or create a new one if cell.outputs: cell.outputs[0]["data"]["text/html"] = embed_code else: - cell.outputs.append(NotebookNode( - { - "data": { - "text/html": embed_code - }, - "execution_count": 0, - "metadata": {}, - "output_type": "execute_result" - })) + cell.outputs.append( + NotebookNode( + { + "data": {"text/html": embed_code}, + "execution_count": 0, + "metadata": {}, + "output_type": "execute_result", + } + ) + ) return cell def preprocess(self, nb, resources): - logging.info('processing notebook for html output' + - ' in ipub metadata to: {}'.format(self.metapath)) + logging.info( + "processing notebook for html output" + + " in ipub metadata to: {}".format(self.metapath) + ) final_cells = [] - float_count = dict([('figure', 0), ('table', 0), - ('code', 0), ('text', 0), ('error', 0)]) + float_count = dict( + [("figure", 0), ("table", 0), ("code", 0), ("text", 0), ("error", 0)] + ) for i, cell in enumerate(nb.cells): - if hasattr(cell.metadata, 'ipub'): - if hasattr(cell.metadata.ipub, 'embed_html'): - if hasattr(cell.metadata.ipub.embed_html, 'filepath'): + if hasattr(cell.metadata, "ipub"): + if hasattr(cell.metadata.ipub, "embed_html"): + if hasattr(cell.metadata.ipub.embed_html, "filepath"): paths = [cell.metadata.ipub.embed_html.filepath] - if hasattr(cell.metadata.ipub.embed_html, - 'other_files'): + if hasattr(cell.metadata.ipub.embed_html, "other_files"): if not isinstance( - cell.metadata.ipub.embed_html.other_files, - list): - raise TypeError("{} should be a list".format( - cell.metadata.ipub.embed_html.other_files)) + cell.metadata.ipub.embed_html.other_files, list + ): + raise TypeError( + "{} should be a list".format( + cell.metadata.ipub.embed_html.other_files + ) + ) paths += cell.metadata.ipub.embed_html.other_files for j, path in enumerate(paths): fpath = self.resolve_path(path, self.metapath) if not os.path.exists(fpath): logging.warning( "file in embed html metadata doesn't exist" - ": {}".format(fpath)) + ": {}".format(fpath) + ) else: resources.setdefault("external_file_paths", []) - resources['external_file_paths'].append(fpath) + resources["external_file_paths"].append(fpath) if j == 0: self.embed_html( - cell, os.path.join( - self.filesfolder, - os.path.basename(fpath))) - - elif hasattr(cell.metadata.ipub.embed_html, 'url'): - self.embed_html( - cell, cell.metadata.ipub.embed_html.url) + cell, + os.path.join( + self.filesfolder, os.path.basename(fpath) + ), + ) + + elif hasattr(cell.metadata.ipub.embed_html, "url"): + self.embed_html(cell, cell.metadata.ipub.embed_html.url) else: logging.warning( - 'cell {} has no filepath or url key in its ' - 'metadata.embed_html'.format(i)) + "cell {} has no filepath or url key in its " + "metadata.embed_html".format(i) + ) for floattype, floatabbr in [ - ('figure', 'fig.'), ('table', 'tbl.'), ('code', 'code'), - ('text', 'text'), ('error', 'error')]: + ("figure", "fig."), + ("table", "tbl."), + ("code", "code"), + ("text", "text"), + ("error", "error"), + ]: if floattype in cell.metadata.ipub: - if floattype != 'code' and not cell.get("outputs", []): + if floattype != "code" and not cell.get("outputs", []): continue float_count[floattype] += 1 if not isinstance(cell.metadata.ipub[floattype], dict): continue - cell.metadata.ipub[floattype]['caption_prefix'] = '{0} {1}: '.format( # noqa: E501 - floattype.capitalize(), float_count[floattype]) - if 'label' in cell.metadata.ipub[floattype]: - label = '{0} {1}'.format( - floatabbr, float_count[floattype]) - resources.setdefault('refmap', {})[ - cell.metadata.ipub[floattype]['label']] = label + cell.metadata.ipub[floattype][ + "caption_prefix" + ] = "{0} {1}: ".format( # noqa: E501 + floattype.capitalize(), float_count[floattype] + ) + if "label" in cell.metadata.ipub[floattype]: + label = "{0} {1}".format(floatabbr, float_count[floattype]) + resources.setdefault("refmap", {})[ + cell.metadata.ipub[floattype]["label"] + ] = label final_cells.append(cell) nb.cells = final_cells diff --git a/ipypublish/preprocessors/latex_doc_links.py b/ipypublish/preprocessors/latex_doc_links.py index f9d7d35..3be4edf 100644 --- a/ipypublish/preprocessors/latex_doc_links.py +++ b/ipypublish/preprocessors/latex_doc_links.py @@ -51,8 +51,7 @@ def resolve_path(fpath, filepath): return os.path.normpath(fpath) -def extract_file_links(source, parent_path, redirect_path, - replace_nonexistent=False): +def extract_file_links(source, parent_path, redirect_path, replace_nonexistent=False): """ extract local linked files Examples @@ -75,7 +74,7 @@ def extract_file_links(source, parent_path, redirect_path, """ # TODO is this robust enough - regex = re.compile('\\[([^\\]]*)\\]\\(([^\\)^\\#]*)([^\\)]*)\\)') + regex = re.compile("\\[([^\\]]*)\\]\\(([^\\)^\\#]*)([^\\)]*)\\)") new_source = source redirected_paths = [] nonexistent_paths = [] @@ -91,11 +90,13 @@ def extract_file_links(source, parent_path, redirect_path, nonexistent_paths.append(respath) if os.path.exists(respath) or replace_nonexistent: redirected_paths.append(respath) - new_path = os.path.normpath(os.path.join( - redirect_path, os.path.basename(path))) + new_path = os.path.normpath( + os.path.join(redirect_path, os.path.basename(path)) + ) new_source = new_source.replace( "[{0}]({1}{2})".format(text, path, label), - "[{0}]({1}{2})".format(text, new_path, label)) + "[{0}]({1}{2})".format(text, new_path, label), + ) return new_source, redirected_paths, nonexistent_paths @@ -120,19 +121,20 @@ class LatexDocLinks(Preprocessor): """ - metapath = traits.Unicode( - '', help="the file path to the notebook").tag(config=True) + metapath = traits.Unicode("", help="the file path to the notebook").tag(config=True) filesfolder = traits.Unicode( - '', help=("the folder path to dump dump internal content to " - "(e.g. images, etc)")).tag(config=True) + "", + help=("the folder path to dump dump internal content to " "(e.g. images, etc)"), + ).tag(config=True) redirect_external = traits.Bool( - True, - help="if True, redirect relatively linked paths to filesfolder" + True, help="if True, redirect relatively linked paths to filesfolder" ).tag(config=True) extract_attachments = traits.Bool( True, - help=("extract attachments stored in the notebook" - "(created by dragging and dropping files into markdown cells)") + help=( + "extract attachments stored in the notebook" + "(created by dragging and dropping files into markdown cells)" + ), ).tag(config=True) output_attachment_template = traits.Unicode( "{unique_key}_{cell_index}_{key}{extension}" @@ -157,16 +159,17 @@ def preprocess_cell(self, cell, resources, cell_index): # extract local linked files source, rpaths, npaths = extract_file_links( - cell.source, self.metapath, self.filesfolder) + cell.source, self.metapath, self.filesfolder + ) if self.redirect_external: cell.source = source - resources['external_file_paths'].extend(rpaths) - resources['unfound_file_paths'].extend(npaths) + resources["external_file_paths"].extend(rpaths) + resources["unfound_file_paths"].extend(npaths) # extract attachments - unique_key = resources.get('unique_key', 'attach') - if 'attachments' in cell and self.extract_attachments: - attachments = cell.pop('attachments') + unique_key = resources.get("unique_key", "attach") + if "attachments" in cell and self.extract_attachments: + attachments = cell.pop("attachments") for key, attachment in attachments.items(): # TODO this only works if there is a single MIME bundle @@ -174,25 +177,29 @@ def preprocess_cell(self, cell, resources, cell_index): ext = guess_extension_without_jpe(mime_type) if ext is None: - ext = '.' + mime_type.rsplit('/')[-1] + ext = "." + mime_type.rsplit("/")[-1] # replace the pointer to the attachment filepath = os.path.normpath( - os.path.join(self.filesfolder, - self.output_attachment_template.format( - unique_key=unique_key, - cell_index=cell_index, - key=os.path.splitext(key)[0], - extension=ext)) + os.path.join( + self.filesfolder, + self.output_attachment_template.format( + unique_key=unique_key, + cell_index=cell_index, + key=os.path.splitext(key)[0], + extension=ext, + ), + ) ) if "source" in cell: cell["source"] = cell["source"].replace( - 'attachment:{}'.format(key), filepath) + "attachment:{}".format(key), filepath + ) # code taken from nbconvert.ExtractOutputPreprocessor if ( not isinstance(data, string_types) - or mime_type == 'application/json' + or mime_type == "application/json" ): # Data is either JSON-like and was parsed into a Python # object according to the spec, or data is for sure @@ -203,17 +210,16 @@ def preprocess_cell(self, cell, resources, cell_index): data = json.dumps(data) # Binary files are base64-encoded, SVG is already XML - if mime_type in { - 'image/png', 'image/jpeg', 'application/pdf'}: + if mime_type in {"image/png", "image/jpeg", "application/pdf"}: # data is b64-encoded as text (str, unicode), # we want the original bytes data = a2b_base64(data) - elif sys.platform == 'win32': - data = data.replace('\n', '\r\n').encode("UTF-8") + elif sys.platform == "win32": + data = data.replace("\n", "\r\n").encode("UTF-8") else: data = data.encode("UTF-8") - if filepath in resources['outputs']: + if filepath in resources["outputs"]: raise ValueError( "Your outputs have filename metadata associated " "with them. Nbconvert saves these outputs to " @@ -225,7 +231,7 @@ def preprocess_cell(self, cell, resources, cell_index): "{}.".format(filepath, cell_index) ) # In the resources, make the figure available - resources['outputs'][filepath] = data + resources["outputs"][filepath] = data return cell, resources @@ -234,63 +240,67 @@ def preprocess(self, nb, resources): Preprocessing to apply on each notebook. """ - logger.info('resolving external file paths' + - ' in ipub metadata to: {}'.format(self.metapath)) + logger.info( + "resolving external file paths" + + " in ipub metadata to: {}".format(self.metapath) + ) resources.setdefault("external_file_paths", []) resources.setdefault("unfound_file_paths", []) - if 'ipub' in nb.metadata: + if "ipub" in nb.metadata: - if 'bibliography' in nb.metadata.ipub: + if "bibliography" in nb.metadata.ipub: bib = nb.metadata.ipub.bibliography bib = resolve_path(bib, self.metapath) if not os.path.exists(bib): - resources['unfound_file_paths'].append(bib) + resources["unfound_file_paths"].append(bib) else: - resources['external_file_paths'].append(bib) - resources['bibliopath'] = bib + resources["external_file_paths"].append(bib) + resources["bibliopath"] = bib if self.redirect_external: nb.metadata.ipub.bibliography = os.path.join( - self.filesfolder, os.path.basename(bib)) + self.filesfolder, os.path.basename(bib) + ) - if "filepath" in nb.metadata.ipub.get('bibglossary', {}): + if "filepath" in nb.metadata.ipub.get("bibglossary", {}): gloss = nb.metadata.ipub.bibglossary.filepath gloss = resolve_path(gloss, self.metapath) if not os.path.exists(gloss): - resources['unfound_file_paths'].append(gloss) + resources["unfound_file_paths"].append(gloss) else: - resources['external_file_paths'].append(gloss) - resources['bibglosspath'] = gloss + resources["external_file_paths"].append(gloss) + resources["bibglosspath"] = gloss if self.redirect_external: nb.metadata.ipub.bibglossary.filepath = os.path.join( - self.filesfolder, os.path.basename(gloss)) + self.filesfolder, os.path.basename(gloss) + ) - if 'logo' in nb.metadata.ipub.get('titlepage', {}): + if "logo" in nb.metadata.ipub.get("titlepage", {}): logo = nb.metadata.ipub.titlepage.logo logo = resolve_path(logo, self.metapath) if not os.path.exists(logo): - resources['unfound_file_paths'].append(logo) + resources["unfound_file_paths"].append(logo) else: - resources['external_file_paths'].append(logo) + resources["external_file_paths"].append(logo) if self.redirect_external: nb.metadata.ipub.titlepage.logo = os.path.join( - self.filesfolder, os.path.basename(logo)) + self.filesfolder, os.path.basename(logo) + ) for index, cell in enumerate(nb.cells): - nb.cells[index], resources = self.preprocess_cell( - cell, resources, index) + nb.cells[index], resources = self.preprocess_cell(cell, resources, index) # filter unique - resources['external_file_paths'] = list( - set(resources['external_file_paths'])) + resources["external_file_paths"] = list(set(resources["external_file_paths"])) upaths = set(resources.pop("unfound_file_paths")) if upaths: - logger.warning('referenced file(s) do not exist' - ': {}'.format(list(upaths))) + logger.warning( + "referenced file(s) do not exist" ": {}".format(list(upaths)) + ) return nb, resources diff --git a/ipypublish/preprocessors/latextags_to_html.py b/ipypublish/preprocessors/latextags_to_html.py index 98cd216..5a6e633 100644 --- a/ipypublish/preprocessors/latextags_to_html.py +++ b/ipypublish/preprocessors/latextags_to_html.py @@ -11,7 +11,7 @@ class DefaultFormatter(string.Formatter): - def __init__(self, default=''): + def __init__(self, default=""): self.default = default def get_value(self, key, args, kwds): @@ -31,9 +31,9 @@ def safe_str(obj): return str(obj) except UnicodeEncodeError: # python 2.7 - obj = re.sub(u"\u2013", "-", obj) # en dash + obj = re.sub(u"\u2013", "-", obj) # en dash obj = re.sub(u"\u2014", "--", obj) # em dash - return obj.encode('ascii', 'ignore').decode('ascii') + return obj.encode("ascii", "ignore").decode("ascii") return "" @@ -114,15 +114,18 @@ class LatexTagsToHTML(Preprocessor): regex = traits.Unicode( r"\\(?:[^a-zA-Z]|[a-zA-Z]+[*=']?)(?:\[.*?\])?{.*?}", - help="the regex to identify latex tags").tag(config=True) + help="the regex to identify latex tags", + ).tag(config=True) bibformat = traits.Unicode( "{author}, {year}.", - help="the format to output \\cite{} tags found in the bibliography" + help="the format to output \\cite{} tags found in the bibliography", ).tag(config=True) labelbycolon = traits.Bool( True, - help=('create reference label based on text before colon, ' - 'e.g. \\ref{fig:example} -> fig 1') + help=( + "create reference label based on text before colon, " + "e.g. \\ref{fig:example} -> fig 1" + ), ).tag(config=True) def __init__(self, *args, **kwargs): @@ -138,11 +141,11 @@ def read_bibliography(path): """ read a bibliography """ - logging.info('reading bibliopath: {}'.format(path)) + logging.info("reading bibliopath: {}".format(path)) bibdatabase = {} bibparser = bibtexparser.bparser.BibTexParser() try: - if hasattr(path, 'open'): + if hasattr(path, "open"): with path.open(encoding="utf8") as bibtex_file: bibtex_data = bibtex_file.read() else: @@ -151,7 +154,7 @@ def read_bibliography(path): bibtex_data = safe_str(bibtex_data) bibdatabase = bibparser.parse(bibtex_data).entries_dict except Exception as err: - logging.error('could not read bibliopath {}: {}'.format(path, err)) + logging.error("could not read bibliopath {}: {}".format(path, err)) return bibdatabase @@ -163,35 +166,34 @@ def process_bib_entry(self, entry): """work out the best way to represent the bib entry """ # abbreviate a list of authors - if 'author' in entry: - authors = re.split(", | and ", entry['author']) + if "author" in entry: + authors = re.split(", | and ", entry["author"]) if len(authors) > 1: - author = authors[0] + ' et al' + author = authors[0] + " et al" else: author = authors[0] - entry['author'] = author + entry["author"] = author # split up date into year, month, day - if 'date' in entry: - date = entry['date'].split('-') + if "date" in entry: + date = entry["date"].split("-") if len(date) == 3: - entry['year'] = date[0] - entry['month'] = date[1] - entry['day'] = date[2] + entry["year"] = date[0] + entry["month"] = date[1] + entry["day"] = date[2] else: - entry['year'] = date[0] + entry["year"] = date[0] text = DefaultFormatter().format(self.bibformat, **entry) - if 'doi' in entry: + if "doi" in entry: return r'{text}'.format( - doi=entry['doi'], text=text) - elif 'url' in entry: - return r'{text}'.format( - url=entry['url'], text=text) - elif 'link' in entry: - return r'{text}'.format( - url=entry['link'], text=text) + doi=entry["doi"], text=text + ) + elif "url" in entry: + return r'{text}'.format(url=entry["url"], text=text) + elif "link" in entry: + return r'{text}'.format(url=entry["link"], text=text) else: return text @@ -203,15 +205,16 @@ def replace_reflabel(self, name, resources): this is particularly useful for slides, which require a prefix #/