diff --git a/.gitignore b/.gitignore index 7058f358b2..e9f4ccc67c 100644 --- a/.gitignore +++ b/.gitignore @@ -18,7 +18,7 @@ tutorials/img docs/src/tutorials/*/ docs/src/tutorials/*.md docs/.CondaPkg -docs/src/tutorials/Optimize!_files +docs/src/tutorials/getstarted_files docs/src/tutorials/*.html docs/src/changelog.md docs/styles/Google diff --git a/.vale.ini b/.vale.ini index ff2bb0bce6..054a57d418 100644 --- a/.vale.ini +++ b/.vale.ini @@ -7,12 +7,16 @@ Packages = Google [formats] # code blocks with Julia in Markdown do not yet work well qmd = md +jl = md [docs/src/*.md] BasedOnStyles = Vale, Google [docs/src/contributing.md] -BasedOnStyles = +BasedOnStyles = Vale, Google +Google.Will = false ; given format and really with intend a _will_ +Google.Headings = false ; some might jeally ahabe [] in their headers +Google.FirstPerson = false ; we pose a few contribution points as first-person questions [Changelog.md, CONTRIBUTING.md] BasedOnStyles = Vale, Google @@ -39,12 +43,14 @@ TokenIgnores = \$(.+)\$,\[.+?\]\(@(ref|id|cite).+?\),`.+`,``.*``,\s{4}.+\n Google.Units = false #wto ignore formats= for now. TokenIgnores = \$(.+)\$,\[.+?\]\(@(ref|id|cite).+?\),`.+`,``.*``,\s{4}.+\n -[tutorials/*.md] ; actually .qmd for the first, second autogenerated +[tutorials/*.qmd] ; actually .qmd for the first, second autogenerated BasedOnStyles = Vale, Google ; ignore (1) math (2) ref and cite keys (3) code in docs (4) math in docs (5,6) indented blocks TokenIgnores = (\$+[^\n$]+\$+) Google.We = false # For tutorials we want to address the user directly. -[docs/src/tutorials/*.md] - ; ignore since they are derived files -BasedOnStyles = +[docs/src/tutorials/*.md] ; actually .qmd for the first, second autogenerated +BasedOnStyles = Vale, Google +; ignore (1) math (2) ref and cite keys (3) code in docs (4) math in docs (5,6) indented blocks +TokenIgnores = (\$+[^\n$]+\$+) +Google.We = false # For tutorials we want to address the user directly. \ No newline at end of file diff --git a/.zenodo.json b/.zenodo.json index d4c2bec6ee..7e5fd36fe7 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -25,6 +25,11 @@ "name": "Riemer, Tom-Christian", "type": "ProjectMember" }, + { + "affiliation": "NTNU Trondheim", + "name": "Oddsen, Sander Engen", + "type": "ProjectMember" + }, { "name": "Schilly, Harald", "type": "Other" diff --git a/Changelog.md b/Changelog.md index 2aab0343fd..d0dce06686 100644 --- a/Changelog.md +++ b/Changelog.md @@ -5,7 +5,13 @@ All notable Changes to the Julia package `Manopt.jl` will be documented in this The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [0.5.5] Januaey 4, 2025 +## [0.5.6] February 10, 2025 + +### Changed + +* bump dependencies of all JuliaManifolds ecosystem packages to be consistent with ManifoldsBase 1.0 + +## [0.5.5] January 4, 2025 ### Added @@ -122,7 +128,7 @@ In general we introduce a few factories, that avoid having to pass the manifold * the previous `stabilize=true` is now set with `(project!)=embed_project!` in general, and if the manifold is represented by points in the embedding, like the sphere, `(project!)=project!` suffices * the new default is `(project!)=copyto!`, so by default no projection/stabilization is performed. -* the positional argument `p` (usually the last or the third to last if subsolvers existed) has been moved to a keyword argument `p=` in all State constructors +* the positional argument `p` (usually the last or the third to last if sub solvers existed) has been moved to a keyword argument `p=` in all State constructors * in `NelderMeadState` the `population` moved from positional to keyword argument as well, * the way to initialise sub solvers in the solver states has been unified In the new variant * the `sub_problem` is always a positional argument; namely the last one diff --git a/Project.toml b/Project.toml index 8a0f0ebd9c..979110531f 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Manopt" uuid = "0fc0a36d-df90-57f3-8f93-d78a9fc72bb5" authors = ["Ronny Bergmann "] -version = "0.5.5" +version = "0.5.6" [deps] ColorSchemes = "35d6a980-a343-548e-a6ea-1d62b119f2f4" @@ -51,7 +51,7 @@ LineSearches = "7.2.0" LinearAlgebra = "1.10" ManifoldDiff = "0.3.8, 0.4" Manifolds = "0.9.11, 0.10" -ManifoldsBase = "0.15.18" +ManifoldsBase = "0.15.18, 1.0" ManoptExamples = "0.1.10" Markdown = "1.10" Plots = "1.30" diff --git a/Readme.md b/Readme.md index 36c3733ca4..e28316e79f 100644 --- a/Readme.md +++ b/Readme.md @@ -36,7 +36,7 @@ In Julia you can get started by just typing using Pkg; Pkg.add("Manopt"); ``` -and then checkout the [Get started: optimize!](https://manoptjl.org/stable/tutorials/Optimize/) tutorial. +and then checkout the [πŸ”οΈ Get started with Manopt.jl](https://manoptjl.org/stable/tutorials/getstarted/) tutorial. ## Related packages diff --git a/docs/Project.toml b/docs/Project.toml index 69176555be..39adbb3279 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -40,9 +40,9 @@ JuMP = "1" LRUCache = "1" LineSearches = "7" Literate = "2" -Manifolds = "0.8.81, 0.9, 0.10" -ManifoldsBase = "0.14.12, 0.15" -Manopt = "0.4, 0.5" +Manifolds = "0.10" +ManifoldsBase = "1" +Manopt = "0.5" Plots = "1" QuadraticModels = "0.9.6" RecursiveArrayTools = "2, 3" diff --git a/docs/make.jl b/docs/make.jl index fb8fb85f27..d174383e1d 100755 --- a/docs/make.jl +++ b/docs/make.jl @@ -21,7 +21,7 @@ Arguments Then you can spare time in the rendering by not passing this argument. If quarto is not run, some tutorials are generated as empty files, since they are referenced from within the documentation. - These are currently `Optimize.md` and `ImplementOwnManifold.md`. + These are currently `getstarted.md` and `ImplementOwnManifold.md`. """ ) exit(0) @@ -35,7 +35,7 @@ tutorials_in_menu = !("--exclude-tutorials" ∈ ARGS) # (a) setup the tutorials menu – check whether all files exist tutorials_menu = "How to..." => [ - "πŸ”οΈ Get started: optimize." => "tutorials/Optimize.md", + "πŸ”οΈ Get started with Manopt.jl" => "tutorials/getstarted.md", "Speedup using in-place computations" => "tutorials/InplaceGradient.md", "Use automatic differentiation" => "tutorials/AutomaticDifferentiation.md", "Define objectives in the embedding" => "tutorials/EmbeddingObjectives.md", @@ -54,8 +54,9 @@ for (name, file) in tutorials_menu.second global all_tutorials_exist = false if !run_quarto @warn "Tutorial $name does not exist at $fn." - if (!isfile(fn)) && - (endswith(file, "Optimize.md") || endswith(file, "ImplementOwnManifold.md")) + if (!isfile(fn)) && ( + endswith(file, "getstarted.md") || endswith(file, "ImplementOwnManifold.md") + ) @warn "Generating empty file, since this tutorial is linked to from the documentation." touch(fn) end diff --git a/docs/src/about.md b/docs/src/about.md index 1ea018a27a..b0b1085c88 100644 --- a/docs/src/about.md +++ b/docs/src/about.md @@ -28,8 +28,8 @@ to clone/fork the repository or open an issue. * [ExponentialFamilyProjection.jl](https://github.com/ReactiveBayes/ExponentialFamilyProjection.jl) package uses `Manopt.jl` to project arbitrary functions onto the closest exponential family distributions. The package also integrates with [`RxInfer.jl`](https://github.com/ReactiveBayes/RxInfer.jl) to enable Bayesian inference in a larger set of probabilistic models. * [Caesar.jl](https://github.com/JuliaRobotics/Caesar.jl) within non-Gaussian factor graph inference algorithms -Is a package missing? [Open an issue](https://github.com/JuliaManifolds/Manopt.jl/issues/new)! -It would be great to collect anything and anyone using Manopt.jl +If you are missing a package, that uses `Manopt.jl`, please [open an issue](https://github.com/JuliaManifolds/Manopt.jl/issues/new). +It would be great to collect anything and anyone using Manopt.jl in this list. ## Further packages diff --git a/docs/src/index.md b/docs/src/index.md index 1eb56a1430..b0cbf14fe4 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -20,7 +20,7 @@ or in other words: find the point ``p`` on the manifold, where ``f`` reaches its It belongs to the β€œManopt family”, which includes [Manopt](https://manopt.org) (Matlab) and [pymanopt.org](https://www.pymanopt.org/) (Python). If you want to delve right into `Manopt.jl` read the -[πŸ”οΈ Get started: optimize.](tutorials/Optimize.md) tutorial. +[πŸ”οΈ Get started with Manopt.jl](tutorials/getstarted.md) tutorial. `Manopt.jl` makes it easy to use an algorithm for your favourite manifold as well as a manifold for your favourite algorithm. It already provides @@ -94,7 +94,7 @@ The notation in the documentation aims to follow the same [notation](https://jul ### Visualization To visualize and interpret results, `Manopt.jl` aims to provide both easy plot functions as well as [exports](helpers/exports.md). Furthermore a system to get [debug](plans/debug.md) during the iterations of an algorithms as well as [record](plans/record.md) capabilities, for example to record a specified tuple of values per iteration, most prominently [`RecordCost`](@ref) and -[`RecordIterate`](@ref). Take a look at the [πŸ”οΈ Get started: optimize.](tutorials/Optimize.md) tutorial on how to easily activate this. +[`RecordIterate`](@ref). Take a look at the [πŸ”οΈ Get started with Manopt.jl](tutorials/getstarted.md) tutorial on how to easily activate this. ## Literature diff --git a/docs/src/solvers/index.md b/docs/src/solvers/index.md index 3227a8644f..c94649bfad 100644 --- a/docs/src/solvers/index.md +++ b/docs/src/solvers/index.md @@ -98,7 +98,7 @@ For these you can use * [Steihaug-Toint Truncated Conjugate-Gradient Method](truncated_conjugate_gradient_descent.md) a solver for a constrained problem defined on a tangent space. -## Alphabetical list List of algorithms +## Alphabetical list of algorithms | Solver | Function | State | |:---------|:----------------|:---------| @@ -202,7 +202,7 @@ also use the third (lowest level) and just call solve!(problem, state) ``` -### Closed-form subsolvers +### Closed-form sub solvers If a subsolver solution is available in closed form, `ClosedFormSubSolverState` is used to indicate that. diff --git a/docs/src/tutorials/InplaceGradient.md b/docs/src/tutorials/InplaceGradient.md index 47f64f447f..a575ba3e82 100644 --- a/docs/src/tutorials/InplaceGradient.md +++ b/docs/src/tutorials/InplaceGradient.md @@ -5,7 +5,7 @@ When it comes to time critical operations, a main ingredient in Julia is given b mutating functions, that is those that compute in place without additional memory allocations. In the following, we illustrate how to do this with `Manopt.jl`. -Let’s start with the same function as in [Get started: optimize!](https://manoptjl.org/stable/tutorials/Optimize!.html) +Let’s start with the same function as in [πŸ”οΈ Get started with Manopt.jl](https://manoptjl.org/stable/tutorials/getstarted.html) and compute the mean of some points, only that here we use the sphere $\mathbb S^{30}$ and $n=800$ points. @@ -32,7 +32,7 @@ p[2] = 1.0 data = [exp(M, p, Οƒ * rand(M; vector_at=p)) for i in 1:n]; ``` -## Classical Definition +## Classical definition The variant from the previous tutorial defines a cost $f(x)$ and its gradient $\operatorname{grad}f(p)$ β€œβ€œβ€ @@ -58,18 +58,18 @@ We can also benchmark this as @benchmark gradient_descent($M, $f, $grad_f, $p0; stopping_criterion=$sc) ``` - BenchmarkTools.Trial: 106 samples with 1 evaluation. - Range (min … max): 46.774 ms … 50.326 ms β”Š GC (min … max): 2.31% … 2.47% - Time (median): 47.207 ms β”Š GC (median): 2.45% - Time (mean Β± Οƒ): 47.364 ms Β± 608.514 ΞΌs β”Š GC (mean Β± Οƒ): 2.53% Β± 0.25% + BenchmarkTools.Trial: 89 samples with 1 evaluation per sample. + Range (min … max): 52.976 ms … 104.222 ms β”Š GC (min … max): 8.05% … 5.55% + Time (median): 55.145 ms β”Š GC (median): 9.99% + Time (mean Β± Οƒ): 56.391 ms Β± 6.102 ms β”Š GC (mean Β± Οƒ): 9.92% Β± 1.43% - β–„β–‡β–…β–‡β–ˆβ–„β–‡ - β–…β–‡β–†β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‡β–‡β–…β–…β–ƒβ–β–†β–β–β–β–…β–β–β–…β–β–ƒβ–ƒβ–β–β–β–β–β–β–β–β–β–β–β–β–ƒβ–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–… β–ƒ - 46.8 ms Histogram: frequency by time 50.2 ms < + β–…β–ˆβ–ˆβ–…β–ƒβ– + β–…β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–β–…β–‡β–…β–β–…β–β–β–…β–…β–β–β–β–…β–…β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–… ▁ + 53 ms Histogram: log(frequency) by time 81.7 ms < - Memory estimate: 182.50 MiB, allocs estimate: 615822. + Memory estimate: 173.54 MiB, allocs estimate: 1167348. -## In-place Computation of the Gradient +## In-place computation of the gradient We can reduce the memory allocations by implementing the gradient to be evaluated in-place. We do this by using a [functor](https://docs.julialang.org/en/v1/manual/methods/#Function-like-objects). @@ -77,7 +77,7 @@ The motivation is twofold: on one hand, we want to avoid variables from the glob for example the manifold `M` or the `data`, being used within the function. Considering to do the same for more complicated cost functions might also be worth pursuing. -Here, we store the data (as reference) and one introduce temporary memory in order to avoid +Here, we store the data (as reference) and one introduce temporary memory to avoid reallocation of memory per `grad_distance` computation. We get ``` julia @@ -116,16 +116,16 @@ We can again benchmark this ) setup = (m2 = deepcopy($p0)) ``` - BenchmarkTools.Trial: 176 samples with 1 evaluation. - Range (min … max): 27.358 ms … 84.206 ms β”Š GC (min … max): 0.00% … 0.00% - Time (median): 27.768 ms β”Š GC (median): 0.00% - Time (mean Β± Οƒ): 28.504 ms Β± 4.338 ms β”Š GC (mean Β± Οƒ): 0.60% Β± 1.96% + BenchmarkTools.Trial: 130 samples with 1 evaluation per sample. + Range (min … max): 36.646 ms … 64.781 ms β”Š GC (min … max): 0.00% … 0.00% + Time (median): 37.559 ms β”Š GC (median): 0.00% + Time (mean Β± Οƒ): 38.658 ms Β± 3.904 ms β”Š GC (mean Β± Οƒ): 0.73% Β± 2.68% - β–‚β–ˆβ–‡β–‚ β–‚ - β–†β–‡β–ˆβ–ˆβ–ˆβ–ˆβ–†β–ˆβ–†β–†β–„β–„β–ƒβ–„β–„β–ƒβ–ƒβ–ƒβ–β–ƒβ–ƒβ–ƒβ–ƒβ–ƒβ–ƒβ–ƒβ–ƒβ–ƒβ–„β–ƒβ–ƒβ–ƒβ–ƒβ–ƒβ–ƒβ–β–ƒβ–β–β–ƒβ–β–β–β–β–β–β–ƒβ–ƒβ–β–β–ƒβ–ƒβ–β–β–β–β–ƒβ–ƒβ–ƒ β–ƒ - 27.4 ms Histogram: frequency by time 31.4 ms < + β–ˆβ–ˆβ–…β–…β–„β–‚β– β–‚ + β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–β–ˆβ–ˆβ–β–…β–β–β–β–…β–β–β–β–β–…β–…β–…β–β–β–β–…β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–β–…β–β–β–β–β–β–β–β–β–β–… β–… + 36.6 ms Histogram: log(frequency) by time 61 ms < - Memory estimate: 3.83 MiB, allocs estimate: 5797. + Memory estimate: 3.59 MiB, allocs estimate: 6863. which is faster by about a factor of 2 compared to the first solver-call. Note that the results `m1` and `m2` are of course the same. @@ -134,7 +134,7 @@ Note that the results `m1` and `m2` are of course the same. distance(M, m1, m2) ``` - 2.4669338186126805e-17 + 4.8317610992693745e-11 ## Technical details @@ -146,21 +146,24 @@ Pkg.status() ``` Status `~/Repositories/Julia/Manopt.jl/tutorials/Project.toml` - [6e4b80f9] BenchmarkTools v1.5.0 - [5ae59095] Colors v0.12.11 - [31c24e10] Distributions v0.25.108 - [26cc04aa] FiniteDifferences v0.12.31 - [7073ff75] IJulia v1.24.2 + [47edcb42] ADTypes v1.13.0 + [6e4b80f9] BenchmarkTools v1.6.0 + βŒƒ [5ae59095] Colors v0.12.11 + [31c24e10] Distributions v0.25.117 + [26cc04aa] FiniteDifferences v0.12.32 + [7073ff75] IJulia v1.26.0 [8ac3fa9e] LRUCache v1.6.1 - [af67fdf4] ManifoldDiff v0.3.10 - [1cead3c2] Manifolds v0.9.18 - [3362f125] ManifoldsBase v0.15.10 - [0fc0a36d] Manopt v0.4.63 `..` - [91a5bcdd] Plots v1.40.4 + [af67fdf4] ManifoldDiff v0.4.2 + [1cead3c2] Manifolds v0.10.13 + [3362f125] ManifoldsBase v1.0.1 + [0fc0a36d] Manopt v0.5.5 `..` + [91a5bcdd] Plots v1.40.9 + [731186ca] RecursiveArrayTools v3.29.0 + Info Packages marked with βŒƒ have new versions available and may be upgradable. ``` julia using Dates now() ``` - 2024-05-26T13:52:05.613 + 2025-02-10T13:22:51.002 diff --git a/docs/styles/config/vocabularies/Manopt/accept.txt b/docs/styles/config/vocabularies/Manopt/accept.txt index 75246fb123..73e5f1fef9 100644 --- a/docs/styles/config/vocabularies/Manopt/accept.txt +++ b/docs/styles/config/vocabularies/Manopt/accept.txt @@ -23,6 +23,7 @@ Cartis canonicalization canonicalized Constantin +[Cc]ubics Dai deactivatable Diepeveen @@ -76,9 +77,11 @@ Munkvold [Mm]ead [Nn]elder Nesterov +Nesterovs Newton nonmonotone nonpositive +[Nn]onsmooth [Pp]arametrising Parametrising [Pp]ock @@ -110,9 +113,11 @@ Stephansen Stokkenes [Ss]ubdifferential [Ss]ubgradient +[Ss]ubgradients subsampled [Ss]ubsolver summand +summands superlinear supertype th diff --git a/ext/ManoptLineSearchesExt.jl b/ext/ManoptLineSearchesExt.jl index 52d8d3c2dd..f1f2f2abba 100644 --- a/ext/ManoptLineSearchesExt.jl +++ b/ext/ManoptLineSearchesExt.jl @@ -39,7 +39,7 @@ function (cs::Manopt.LineSearchesStepsize)( end function Ο•dΟ•(Ξ±) # TODO: optimize? - retract!(M, p_tmp, p, Ξ·, Ξ±, cs.retraction_method) + ManifoldsBase.retract_fused!(M, p_tmp, p, Ξ·, Ξ±, cs.retraction_method) get_gradient!(mp, X_tmp, p_tmp) vector_transport_to!(M, Y_tmp, p, Ξ·, p_tmp, cs.vector_transport_method) phi = f(M, p_tmp) diff --git a/src/Manopt.jl b/src/Manopt.jl index 091f0accfc..e8d5e3a5b2 100644 --- a/src/Manopt.jl +++ b/src/Manopt.jl @@ -53,6 +53,7 @@ using ManifoldDiff: riemannian_gradient!, riemannian_Hessian, riemannian_Hessian! +using ManifoldsBase using ManifoldsBase: AbstractBasis, AbstractDecoratorManifold, diff --git a/src/helpers/checks.jl b/src/helpers/checks.jl index d771dee7f1..334449444b 100644 --- a/src/helpers/checks.jl +++ b/src/helpers/checks.jl @@ -50,7 +50,7 @@ function check_differential( # T = exp10.(log_range) # points `p_i` to evaluate the error function at - points = map(t -> retract(M, p, Xn, t, retraction_method), T) + points = map(t -> ManifoldsBase.retract_fused(M, p, Xn, t, retraction_method), T) costs = [F(M, pi) for pi in points] # linearized linearized = map(t -> F(M, p) + t * dF(M, p, Xn), T) @@ -297,7 +297,7 @@ function check_Hessian( # T = exp10.(log_range) # points `p_i` to evaluate error function at - points = map(t -> retract(M, p, X_n, t, retraction_method), T) + points = map(t -> ManifoldsBase.retract_fused(M, p, X_n, t, retraction_method), T) # corresponding costs costs = [f(M, pi) for pi in points] # linearized diff --git a/src/plans/debug.jl b/src/plans/debug.jl index 298eb1d2b2..1d205580d3 100644 --- a/src/plans/debug.jl +++ b/src/plans/debug.jl @@ -158,7 +158,7 @@ Whether internal variables are updates is determined by `always_update`. This method does not perform any print itself but relies on it's children's print. -It also sets the subsolvers active parameter, see |`DebugWhenActive`}(#ref). +It also sets the sub solvers active parameter, see |`DebugWhenActive`}(#ref). Here, the `activattion_offset` can be used to specify whether it refers to _this_ iteration, the `i`th, when this call is _before_ the iteration, then the offset should be 0, for the _next_ iteration, that is if this is called _after_ an iteration, it has to be set to 1. @@ -185,7 +185,7 @@ function (d::DebugEvery)(p::AbstractManoptProblem, st::AbstractManoptSolverState elseif d.always_update d.debug(p, st, -1) end - # set activity for this iterate in subsolvers + # set activity for this iterate in sub solvers set_parameter!( st, :SubState, diff --git a/src/plans/record.jl b/src/plans/record.jl index a8ccabdd04..585b0a234b 100644 --- a/src/plans/record.jl +++ b/src/plans/record.jl @@ -243,7 +243,7 @@ function (re::RecordEvery)( elseif re.always_update re.record(amp, ams, 0) end - # Set activity to activate or deactivate subsolvers + # Set activity to activate or deactivate sub solvers # note that since recording is happening at the end # sets activity for the _next_ iteration set_parameter!( @@ -390,7 +390,7 @@ getindex(r::RecordGroup, i) = get_record(r, i) @doc raw""" RecordSubsolver <: RecordAction -Record the current subsolvers recording, by calling [`get_record`](@ref) +Record the current sub solvers recording, by calling [`get_record`](@ref) on the sub state with # Fields @@ -428,7 +428,7 @@ status_summary(::RecordSubsolver) = ":Subsolver" record action that only records if the `active` boolean is set to true. This can be set from outside and is for example triggered by |`RecordEvery`](@ref) on recordings of the subsolver. -While this is for subsolvers maybe not completely necessary, recording values that +While this is for sub solvers maybe not completely necessary, recording values that are never accessible, is not that useful. # Fields diff --git a/src/plans/stepsize.jl b/src/plans/stepsize.jl index a78a378104..845d7aaec2 100644 --- a/src/plans/stepsize.jl +++ b/src/plans/stepsize.jl @@ -730,7 +730,7 @@ function linesearch_backtrack!( stop_decreasing_at_step=1000, ) where {TF,T} msg = "" - retract!(M, q, p, Ξ·, s, retraction_method) + ManifoldsBase.retract_fused!(M, q, p, Ξ·, s, retraction_method) f_q = f(M, q) search_dir_inner = real(inner(M, p, Ξ·, X)) if search_dir_inner >= 0 @@ -742,7 +742,7 @@ function linesearch_backtrack!( (stop_increasing_at_step == 0) && break i = i + 1 s = s / contract - retract!(M, q, p, Ξ·, s, retraction_method) + ManifoldsBase.retract_fused!(M, q, p, Ξ·, s, retraction_method) f_q = f(M, q) if i == stop_increasing_at_step (length(msg) > 0) && (msg = "$msg\n") @@ -762,7 +762,7 @@ function linesearch_backtrack!( (!additional_decrease_condition(M, q)) i = i + 1 s = contract * s - retract!(M, q, p, Ξ·, s, retraction_method) + ManifoldsBase.retract_fused!(M, q, p, Ξ·, s, retraction_method) f_q = f(M, q) if i == stop_decreasing_at_step (length(msg) > 0) && (msg = "$msg\n") @@ -1294,7 +1294,7 @@ function (a::WolfePowellLinesearchStepsize)( s_minus = step f0 = get_cost(mp, p) - retract!(M, a.candidate_point, p, Ξ·, step, a.retraction_method) + ManifoldsBase.retract_fused!(M, a.candidate_point, p, Ξ·, step, a.retraction_method) fNew = get_cost(mp, a.candidate_point) vector_transport_to!( M, a.candidate_direction, p, Ξ·, a.candidate_point, a.vector_transport_method @@ -1303,7 +1303,9 @@ function (a::WolfePowellLinesearchStepsize)( while (fNew > f0 + a.sufficient_decrease * step * l) && (s_minus > 10^(-9)) # decrease s_minus = s_minus * 0.5 step = s_minus - retract!(M, a.candidate_point, p, Ξ·, step, a.retraction_method) + ManifoldsBase.retract_fused!( + M, a.candidate_point, p, Ξ·, step, a.retraction_method + ) fNew = get_cost(mp, a.candidate_point) end s_plus = 2.0 * s_minus @@ -1318,13 +1320,15 @@ function (a::WolfePowellLinesearchStepsize)( (s_plus < max_step_increase)# increase s_plus = s_plus * 2.0 step = s_plus - retract!(M, a.candidate_point, p, Ξ·, step, a.retraction_method) + ManifoldsBase.retract_fused!( + M, a.candidate_point, p, Ξ·, step, a.retraction_method + ) fNew = get_cost(mp, a.candidate_point) end s_minus = s_plus / 2.0 end end - retract!(M, a.candidate_point, p, Ξ·, s_minus, a.retraction_method) + ManifoldsBase.retract_fused!(M, a.candidate_point, p, Ξ·, s_minus, a.retraction_method) vector_transport_to!( M, a.candidate_direction, p, Ξ·, a.candidate_point, a.vector_transport_method ) @@ -1332,7 +1336,7 @@ function (a::WolfePowellLinesearchStepsize)( while real(inner(M, a.candidate_point, a.candidate_tangent, a.candidate_direction)) < a.sufficient_curvature * l step = (s_minus + s_plus) / 2 - retract!(M, a.candidate_point, p, Ξ·, step, a.retraction_method) + ManifoldsBase.retract_fused!(M, a.candidate_point, p, Ξ·, step, a.retraction_method) fNew = get_cost(mp, a.candidate_point) if fNew <= f0 + a.sufficient_decrease * step * l s_minus = step @@ -1340,7 +1344,9 @@ function (a::WolfePowellLinesearchStepsize)( s_plus = step end abs(s_plus - s_minus) <= a.stop_when_stepsize_less && break - retract!(M, a.candidate_point, p, Ξ·, s_minus, a.retraction_method) + ManifoldsBase.retract_fused!( + M, a.candidate_point, p, Ξ·, s_minus, a.retraction_method + ) vector_transport_to!( M, a.candidate_direction, p, Ξ·, a.candidate_point, a.vector_transport_method ) @@ -1468,7 +1474,7 @@ function (a::WolfePowellBinaryLinesearchStepsize)( Ξ² = Inf t = 1.0 f0 = get_cost(amp, get_iterate(ams)) - xNew = retract(M, get_iterate(ams), Ξ·, t, a.retraction_method) + xNew = ManifoldsBase.retract_fused(M, get_iterate(ams), Ξ·, t, a.retraction_method) fNew = get_cost(amp, xNew) Ξ·_xNew = vector_transport_to(M, get_iterate(ams), Ξ·, xNew, a.vector_transport_method) gradient_new = get_gradient(amp, xNew) @@ -1486,7 +1492,7 @@ function (a::WolfePowellBinaryLinesearchStepsize)( (!nAt && nWt) && (Ξ± = t) # A(t) holds but W(t) fails t = isinf(Ξ²) ? 2 * Ξ± : (Ξ± + Ξ²) / 2 # Update trial point - retract!(M, xNew, get_iterate(ams), Ξ·, t, a.retraction_method) + ManifoldsBase.retract_fused!(M, xNew, get_iterate(ams), Ξ·, t, a.retraction_method) fNew = get_cost(amp, xNew) gradient_new = get_gradient(amp, xNew) vector_transport_to!( diff --git a/src/solvers/DouglasRachford.jl b/src/solvers/DouglasRachford.jl index 871c16eed3..80fd52d930 100644 --- a/src/solvers/DouglasRachford.jl +++ b/src/solvers/DouglasRachford.jl @@ -360,7 +360,7 @@ function step_solver!(amp::AbstractManoptProblem, drs::DouglasRachfordState, k) get_proximal_map!(amp, drs.p, drs.Ξ»(k), drs.s_tmp, 2) _reflect!(M, drs.s_tmp, drs.p, drs.s_tmp, drs.R, drs.reflection_evaluation) # relaxation - drs.s = retract( + drs.s = ManifoldsBase.retract_fused( M, drs.s, inverse_retract(M, drs.s, drs.s_tmp, drs.inverse_retraction_method), diff --git a/src/solvers/NelderMead.jl b/src/solvers/NelderMead.jl index 500fc3c3cb..b63f9a3290 100644 --- a/src/solvers/NelderMead.jl +++ b/src/solvers/NelderMead.jl @@ -349,7 +349,7 @@ function step_solver!(mp::AbstractManoptProblem, s::NelderMeadState, ::Any) # --- Shrink --- if continue_steps for i in 2:length(ind) - retract!( + ManifoldsBase.retract_fused!( M, s.population.pts[i], s.population.pts[1], diff --git a/src/solvers/augmented_Lagrangian_method.jl b/src/solvers/augmented_Lagrangian_method.jl index d26006f916..76e64a3596 100644 --- a/src/solvers/augmented_Lagrangian_method.jl +++ b/src/solvers/augmented_Lagrangian_method.jl @@ -473,7 +473,7 @@ function augmented_Lagrangian_method!( ), sub_problem::AbstractManoptProblem=DefaultManoptProblem( M, - # pass down objective type to subsolvers + # pass down objective type to sub solvers decorate_objective!( M, ManifoldGradientObjective(sub_cost, sub_grad; evaluation=evaluation); diff --git a/src/solvers/cma_es.jl b/src/solvers/cma_es.jl index c481ca6c12..7b4d945877 100644 --- a/src/solvers/cma_es.jl +++ b/src/solvers/cma_es.jl @@ -260,7 +260,9 @@ function step_solver!(mp::AbstractManoptProblem, s::CMAESState, iteration::Int) s.buffer .*= s.deviations # Eqs. (38) and (39) mul!(s.ys_c[i], B, s.buffer) # Eqs. (38) and (39) get_vector!(M, Y_m, s.p_m, s.ys_c[i], s.basis) # Eqs. (38) and (39) - retract!(M, s.population[i], s.p_m, Y_m, s.Οƒ, s.retraction_method) # Eq. (40) + ManifoldsBase.retract_fused!( + M, s.population[i], s.p_m, Y_m, s.Οƒ, s.retraction_method + ) # Eq. (40) end fitness_vals = map(p -> get_cost(mp, p), s.population) s.best_fitness_current_gen, s.worst_fitness_current_gen = extrema(fitness_vals) @@ -280,7 +282,7 @@ function step_solver!(mp::AbstractManoptProblem, s::CMAESState, iteration::Int) for i in 1:(s.ΞΌ) # Eq. (41) s.buffer .+= s.recombination_weights[i] .* ys_c_sorted[i] end - new_m = retract( + new_m = ManifoldsBase.retract_fused( M, s.p_m, get_vector(M, s.p_m, s.buffer, s.basis), s.c_m * s.Οƒ, s.retraction_method ) # Eq. (42) diff --git a/src/solvers/conjugate_gradient_descent.jl b/src/solvers/conjugate_gradient_descent.jl index 2ecc146fa1..54376f398f 100644 --- a/src/solvers/conjugate_gradient_descent.jl +++ b/src/solvers/conjugate_gradient_descent.jl @@ -173,7 +173,9 @@ function step_solver!(amp::AbstractManoptProblem, cgs::ConjugateGradientDescentS M = get_manifold(amp) copyto!(M, cgs.p_old, cgs.p) current_stepsize = get_stepsize(amp, cgs, k, cgs.Ξ΄) - retract!(M, cgs.p, cgs.p, cgs.Ξ΄, current_stepsize, cgs.retraction_method) + ManifoldsBase.retract_fused!( + M, cgs.p, cgs.p, cgs.Ξ΄, current_stepsize, cgs.retraction_method + ) get_gradient!(amp, cgs.X, cgs.p) cgs.Ξ² = cgs.coefficient(amp, cgs, k) vector_transport_to!(M, cgs.Ξ΄, cgs.p_old, cgs.Ξ΄, cgs.p, cgs.vector_transport_method) diff --git a/src/solvers/convex_bundle_method.jl b/src/solvers/convex_bundle_method.jl index 4a19e036f2..99ebf64373 100644 --- a/src/solvers/convex_bundle_method.jl +++ b/src/solvers/convex_bundle_method.jl @@ -474,7 +474,7 @@ end # # -# Dispatching on different types of subsolvers +# Dispatching on different types of sub solvers # (a) closed form allocating function _convex_bundle_subsolver!( M, bms::ConvexBundleMethodState{P,T,F,ClosedFormSubSolverState{AllocatingEvaluation}} diff --git a/src/solvers/gradient_descent.jl b/src/solvers/gradient_descent.jl index 66f36764fd..9cc434591a 100644 --- a/src/solvers/gradient_descent.jl +++ b/src/solvers/gradient_descent.jl @@ -246,6 +246,6 @@ function initialize_solver!(mp::AbstractManoptProblem, s::GradientDescentState) end function step_solver!(p::AbstractManoptProblem, s::GradientDescentState, k) step, s.X = s.direction(p, s, k) - retract!(get_manifold(p), s.p, s.p, s.X, -step, s.retraction_method) + ManifoldsBase.retract_fused!(get_manifold(p), s.p, s.p, s.X, -step, s.retraction_method) return s end diff --git a/src/solvers/proximal_bundle_method.jl b/src/solvers/proximal_bundle_method.jl index 583705ce6c..10773c916c 100644 --- a/src/solvers/proximal_bundle_method.jl +++ b/src/solvers/proximal_bundle_method.jl @@ -432,7 +432,7 @@ get_solver_result(pbms::ProximalBundleMethodState) = pbms.p_last_serious # # -# Dispatching on different types of subsolvers +# Dispatching on different types of sub solvers # (a) closed form allocating function _proximal_bundle_subsolver!( M, pbms::ProximalBundleMethodState{P,T,F,ClosedFormSubSolverState{AllocatingEvaluation}} diff --git a/src/solvers/quasi_Newton.jl b/src/solvers/quasi_Newton.jl index 10240416b8..957e933cde 100644 --- a/src/solvers/quasi_Newton.jl +++ b/src/solvers/quasi_Newton.jl @@ -366,7 +366,7 @@ function step_solver!(mp::AbstractManoptProblem, qns::QuasiNewtonState, k) end Ξ± = qns.stepsize(mp, qns, k, qns.Ξ·) copyto!(M, qns.p_old, get_iterate(qns)) - retract!(M, qns.p, qns.p, qns.Ξ·, Ξ±, qns.retraction_method) + ManifoldsBase.retract_fused!(M, qns.p, qns.p, qns.Ξ·, Ξ±, qns.retraction_method) qns.Ξ· .*= Ξ± # qns.yk update fails if Ξ± is equal to 0 because then Ξ² is NaN Ξ² = ifelse( diff --git a/test/solvers/test_difference_of_convex.jl b/test/solvers/test_difference_of_convex.jl index 99e6231516..f405552d36 100644 --- a/test/solvers/test_difference_of_convex.jl +++ b/test/solvers/test_difference_of_convex.jl @@ -192,7 +192,7 @@ import Manifolds: inner ) end @testset "Running the closed form solution solvers" begin - # make them a bit by providing subsolvers as functions + # make them a bit by providing sub solvers as functions function dca_sub(M, p, X) q = copy(M, p) lin_s = LinearizedDCCost(g, copy(M, p), copy(M, p, X)) @@ -216,7 +216,7 @@ import Manifolds: inner @test isapprox(M, p11, p12) @test f(M, p11) β‰ˆ 0.0 atol = 1e-15 - # fake them a bit by providing subsolvers as functions + # fake them a bit by providing sub solvers as functions function prox_g(M, Ξ», p) q = copy(M, p) prox = ProximalDCCost(g, copy(M, p), Ξ») diff --git a/tutorials/AutomaticDifferentiation.qmd b/tutorials/AutomaticDifferentiation.qmd index fe5da3abc9..7056c84d7b 100644 --- a/tutorials/AutomaticDifferentiation.qmd +++ b/tutorials/AutomaticDifferentiation.qmd @@ -28,7 +28,7 @@ First, load all necessary packages ```{julia} using Manopt, Manifolds, Random, LinearAlgebra -using FiniteDifferences, ManifoldDiff +using FiniteDifferences, ManifoldDiff, ADTypes Random.seed!(42); ``` @@ -109,7 +109,7 @@ Manifolds provides a finite difference scheme in tangent spaces, that you can in ```{julia} r_backend = ManifoldDiff.TangentDiffBackend( - ManifoldDiff.FiniteDifferencesBackend() + AutoFiniteDifferences(central_fdm(5, 1)) ) gradf1_FD(p) = ManifoldDiff.gradient(M, f1, p, r_backend) @@ -161,9 +161,8 @@ The gradient is now computed combining our gradient scheme with FiniteDifference ```{julia} function grad_f2_AD(M, p) - return Manifolds.gradient( - M, F, p, Manifolds.RiemannianProjectionBackend(ManifoldDiff.FiniteDifferencesBackend()) - ) + b = Manifolds.RiemannianProjectionBackend(AutoFiniteDifferences(central_fdm(5, 1))) + return Manifolds.gradient(M, F, p, b) end X3 = grad_f2_AD(M, p) norm(M, p, X1 - X3) @@ -212,7 +211,10 @@ We define this here again as a function `grad_G_FD` that could be used in the `M ```{julia} function grad_G_FD(N, q) return Manifolds.gradient( - N, G, q, ManifoldDiff.RiemannianProjectionBackend(ManifoldDiff.FiniteDifferencesBackend()) + N, + G, + q, + ManifoldDiff.RiemannianProjectionBackend(AutoFiniteDifferences(central_fdm(5, 1))), ) end G1 = grad_G_FD(N, q) diff --git a/tutorials/CountAndCache.qmd b/tutorials/CountAndCache.qmd index 4e80cf5cbf..6f1f426178 100644 --- a/tutorials/CountAndCache.qmd +++ b/tutorials/CountAndCache.qmd @@ -5,7 +5,7 @@ author: Ronny Bergmann In this tutorial, we want to investigate the caching and counting (statistics) features of [Manopt.jl](https://manoptjl.org). We reuse the optimization tasks from the -introductory tutorial [Get started: optimize!](https://manoptjl.org/stable/tutorials/Optimize!.html). +introductory tutorial [πŸ”οΈ Get started with Manopt.jl](getstarted.md). ## Introduction @@ -67,7 +67,7 @@ using ManifoldDiff: grad_distance ## Counting -We first define our task, the Riemannian Center of Mass from the [Get started: optimize!](https://manoptjl.org/stable/tutorials/Optimize!.html) tutorial. +We first define our task, the Riemannian Center of Mass from the [πŸ”οΈ Get started with Manopt.jl](getstarted.md) tutorial. ```{julia} n = 100 diff --git a/tutorials/EmbeddingObjectives.qmd b/tutorials/EmbeddingObjectives.qmd index a3b7112a2e..cfdcdaee47 100644 --- a/tutorials/EmbeddingObjectives.qmd +++ b/tutorials/EmbeddingObjectives.qmd @@ -181,7 +181,7 @@ distance(M, q1, q2) This conversion also works for the gradients of constraints, and is passed down to -subsolvers by default when these are created using the Euclidean objective $f$, $\nabla f$ and $\nabla^2 f$. +sub solvers by default when these are created using the Euclidean objective $f$, $\nabla f$ and $\nabla^2 f$. ## Summary diff --git a/tutorials/HowToDebug.qmd b/tutorials/HowToDebug.qmd index 9dcc6b1297..cfc22609b1 100644 --- a/tutorials/HowToDebug.qmd +++ b/tutorials/HowToDebug.qmd @@ -106,7 +106,7 @@ Note that the number (`25`) yields that all but `:Start` and `:Stop` are only di ## Subsolver debug -Subsolvers have a `sub_kwargs` keyword, such that you can pass keywords to the sub solver as well. This works well if you do not plan to change the subsolver. If you do you can wrap your own `solver_state=` argument in a [`decorate_state!`](@ref) and pass a `debug=` password to this function call. +Sub solvers have a `sub_kwargs` keyword, such that you can pass keywords to the sub solver as well. This works well if you do not plan to change the subsolver. If you do you can wrap your own `solver_state=` argument in a [`decorate_state!`](@ref) and pass a `debug=` password to this function call. Keywords in a keyword have to be passed as pairs (`:debug => [...]`). For most debugs, there further exists a longer form to specify the format to print. @@ -123,10 +123,10 @@ p3 = exact_penalty_method( ); ``` -The different lengths of the dotted lines come from the fact that ---at least in the beginning--- the subsolver performs a few steps and each subsolvers step prints a dot. +The different lengths of the dotted lines come from the fact that ---at least in the beginning--- the subsolver performs a few steps and each sub solvers step prints a dot. -For this issue, there is the next symbol (similar to the `:Stop`) to indicate that a debug set is a subsolver set `:WhenActive`, which introduces a [`DebugWhenActive`](@ref) that is only activated when the outer debug is actually active, or inother words [`DebugEvery`](@ref) is active itself. -Furthermore, we want to print the iteration number _before_ printing the subsolvers steps, so we put this into a `Pair`, but we can leave the remaining ones as single +For this issue, there is the next symbol (similar to the `:Stop`) to indicate that a debug set is a subsolver set `:WhenActive`, which introduces a [`DebugWhenActive`](@ref) that is only activated when the outer debug is actually active, or another words [`DebugEvery`](@ref) is active itself. +Furthermore, we want to print the iteration number _before_ printing the sub solvers steps, so we put this into a `Pair`, but we can leave the remaining ones as single entries. Finally we also prefix `:Stop` with `" | "` and print the iteration number at the time we stop. We get diff --git a/tutorials/HowToRecord.qmd b/tutorials/HowToRecord.qmd index c396dddf34..8a62a92606 100644 --- a/tutorials/HowToRecord.qmd +++ b/tutorials/HowToRecord.qmd @@ -11,11 +11,11 @@ This tutorial illustrates how to: * record within a subsolver * define an own `RecordAction` to perform individual recordings. -Several predefined recordings exist, for example [`RecordCost`](https://manoptjl.org/stable/plans/record/#Manopt.RecordCost) or [`RecordGradient`](https://manoptjl.org/stable/solvers/gradient_descent/#Manopt.RecordGradient), if the problem the solver uses provides a gradient. -For fields of the `State` the recording can also be done [`RecordEntry`](https://manoptjl.org/stable/plans/record/#Manopt.RecordEvery). +Several predefined recordings exist, for example [`RecordCost`](@ref) or [`RecordGradient`](https://manoptjl.org/stable/solvers/gradient_descent/#Manopt.RecordGradient), if the problem the solver uses provides a gradient. +For fields of the `State` the recording can also be done [`RecordEntry`](@ref). For other recordings, for example more advanced computations before storing a value, an own `RecordAction` can be defined. -We illustrate these using the gradient descent from the [Get started: optimize!](https://manoptjl.org/stable/tutorials/Optimize!.html) tutorial. +We illustrate these using the gradient descent from the [Get started: optimize](https://manoptjl.org/stable/tutorials/getstarted.html) tutorial. Here the focus is put on ways to investigate the behaviour during iterations by using Recording techniques. @@ -204,9 +204,9 @@ grad_g(M, p) = [project(M, p, mI[:, i]) for i in 1:d] p0 = project(M2, [ones(2)..., zeros(d - 3)..., 0.1]) ``` -We directly start with recording the subsolvers Iteration. +We directly start with recording the sub solvers Iteration. We can specify what to record in the subsolver using the `sub_kwargs` -keyword argument with a `Symbol => value` pair. Here we specify to record the iteration and the cost in every subsolvers step. +keyword argument with a `Symbol => value` pair. Here we specify to record the iteration and the cost in every sub solvers step. Furthermore, we have to β€œcollect” this recording after every sub solver run. This is done with the `:Subsolver` keyword in the main `record=` keyword. @@ -273,7 +273,7 @@ s3 = exact_penalty_method( ); ``` -Then the following displays also the reasons why each of the recorded subsolvers stopped and the corresponding cost +Then the following displays also the reasons why each of the recorded sub solvers stopped and the corresponding cost ```{julia} get_record(s3) diff --git a/tutorials/ImplementASolver.qmd b/tutorials/ImplementASolver.qmd index 51e0840cb5..ce89a6b6b8 100644 --- a/tutorials/ImplementASolver.qmd +++ b/tutorials/ImplementASolver.qmd @@ -4,7 +4,7 @@ author: "Ronny Bergmann" --- When you have used a few solvers from `Manopt.jl` for example like in the opening -tutorial [Get started: optimize!](https://manoptjl.org/stable/tutorials/Optimize!.html) +tutorial [Get started: optimize](https://manoptjl.org/stable/tutorials/getstarted.html) you might come to the idea of implementing a solver yourself. After a short introduction of the algorithm we aim to implement, @@ -204,7 +204,7 @@ In practice, however, it is preferable to cache intermediate values like cost of Now we can just run the solver already. We take the same example as for the other tutorials -We first define our task, the Riemannian Center of Mass from the [Get started: optimize!](https://manoptjl.org/stable/tutorials/Optimize!.html) tutorial. +We first define our task, the Riemannian Center of Mass from the [Get started: optimize](https://manoptjl.org/stable/tutorials/getstarted.html) tutorial. ```{julia} #| output: false diff --git a/tutorials/ImplementOwnManifold.qmd b/tutorials/ImplementOwnManifold.qmd index 93fc77ae8e..3c7d5c69bf 100644 --- a/tutorials/ImplementOwnManifold.qmd +++ b/tutorials/ImplementOwnManifold.qmd @@ -10,7 +10,7 @@ CurrentModule = Manopt ```` When you have used a few solvers from [`Manopt.jl`](https://manoptjl.org/) for example like in the opening -tutorial [πŸ”οΈ Get started: optimize!](https://manoptjl.org/stable/tutorials/Optimize!.html) +tutorial [πŸ”οΈ Get started with Manopt.jl](getstarted.md) and also familiarized yourself with how to work with manifolds in general at [πŸš€ Get Started with `Manifolds.jl`](https://juliamanifolds.github.io/Manifolds.jl/stable/tutorials/getstarted.html), you might come across the point that you want to @@ -80,7 +80,7 @@ struct ScaledSphere <: AbstractManifold{ℝ} end ``` -We would like to compute a mean and/or median similar to [πŸ”οΈ Get started: optimize!](https://manoptjl.org/stable/tutorials/Optimize!.html). +We would like to compute a mean and/or median similar to [πŸ”οΈ Get started with Manopt.jl!](https://manoptjl.org/stable/tutorials/getstarted.html). For given a set of points $q_1,\ldots,q_n$ we want to compute [Karcher:1977](@cite) ```math @@ -154,8 +154,8 @@ To be precise, we have to implement the ``[in-place variant](@extref ManifoldsBa ```{julia} import ManifoldsBase: retract_project! -function retract_project!(M::ScaledSphere, q, p, X, t::Number) - q .= p .+ t .* X +function retract_project!(M::ScaledSphere, q, p, X) + q .= p .+ X q .*= M.radius / norm(q) return q end diff --git a/tutorials/InplaceGradient.qmd b/tutorials/InplaceGradient.qmd index cb63b1dc4a..04120bad6c 100644 --- a/tutorials/InplaceGradient.qmd +++ b/tutorials/InplaceGradient.qmd @@ -7,7 +7,7 @@ When it comes to time critical operations, a main ingredient in Julia is given b mutating functions, that is those that compute in place without additional memory allocations. In the following, we illustrate how to do this with `Manopt.jl`. -Let's start with the same function as in [Get started: optimize!](https://manoptjl.org/stable/tutorials/Optimize!.html) +Let's start with the same function as in [πŸ”οΈ Get started with Manopt.jl](tutorials/getstarted.md) and compute the mean of some points, only that here we use the sphere $\mathbb S^{30}$ and $n=800$ points. diff --git a/tutorials/Project.toml b/tutorials/Project.toml index 1bd38bffc2..40a9e920d9 100644 --- a/tutorials/Project.toml +++ b/tutorials/Project.toml @@ -1,4 +1,5 @@ [deps] +ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" Colors = "5ae59095-9a9b-59fe-a467-6f913c188581" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" @@ -19,9 +20,9 @@ Distributions = "0.25" FiniteDifferences = "0.12" IJulia = "1" LRUCache = "1.4" -ManifoldDiff = "0.3.9" -Manifolds = "0.8.81, 0.9, 0.10" -ManifoldsBase = "0.14.12, 0.15" +ManifoldDiff = "0.4" +Manifolds = "0.10" +ManifoldsBase = "1" Manopt = "0.5" Plots = "1.38" RecursiveArrayTools = "2, 3" diff --git a/tutorials/_quarto.yml b/tutorials/_quarto.yml index 067511d23b..b87d242828 100644 --- a/tutorials/_quarto.yml +++ b/tutorials/_quarto.yml @@ -3,7 +3,6 @@ project: output-dir: ../docs/src/tutorials render: - "*.qmd" - - "!GeodesicRegression.qmd" - "!InplaceGradient.qmd" crossref: diff --git a/tutorials/Optimize.qmd b/tutorials/getstarted.qmd similarity index 98% rename from tutorials/Optimize.qmd rename to tutorials/getstarted.qmd index 3a155ed82f..923db2e09f 100644 --- a/tutorials/Optimize.qmd +++ b/tutorials/getstarted.qmd @@ -1,5 +1,5 @@ --- -title: "πŸ”οΈ Get started: optimize." +title: "πŸ”οΈ Get started with Manopt.jl" author: Ronny Bergmann --- @@ -20,7 +20,7 @@ This can also be written as ``` where the aim is to compute the minimizer $p^*$ numerically. -As an example, consider the generalisation of the [(arithemtic) mean](https://en.wikipedia.org/wiki/Arithmetic_mean). +As an example, consider the generalisation of the [(arithmetic) mean](https://en.wikipedia.org/wiki/Arithmetic_mean). In the Euclidean case with $d∈\mathbb N$, that is for $n∈\mathbb N$ data points $y_1,\ldots,y_n ∈ ℝ^d$ the mean ```math @@ -300,7 +300,7 @@ now() ````{=commonmark} ```@bibliography -Pages = ["Optimize.md"] +Pages = ["getstarted.md"] Canonical=false ``` ```` \ No newline at end of file