diff --git a/package-lock.json b/package-lock.json index 2747d67..70ae2ab 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,6 +1,6 @@ { "name": "ml-levenberg-marquardt", - "version": "1.0.3", + "version": "1.1.0", "lockfileVersion": 1, "requires": true, "dependencies": { @@ -4692,6 +4692,11 @@ "which": "^1.3.0" } }, + "norm-dist": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/norm-dist/-/norm-dist-2.0.1.tgz", + "integrity": "sha512-7EQCgU3E8kvbb97QsFlj7fWLnPtHB10xCisJ+SbUrKGZUWCUugTi1q8YOC8U/ln1xiLbNiEwHPKYkT6A+Ja/jw==" + }, "normalize-package-data": { "version": "2.4.0", "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.4.0.tgz", diff --git a/package.json b/package.json index a4879d8..b5e43bb 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "ml-levenberg-marquardt", - "version": "1.0.3", + "version": "1.1.0", "description": "Curve fitting method in javascript", "main": "./lib/index.js", "module": "./src/index.js", @@ -12,7 +12,7 @@ "scripts": { "eslint": "eslint src", "eslint-fix": "npm run eslint -- --fix", - "prepare": "rollup -c", + "preparea": "rollup -c", "test": "npm run test-only && npm run eslint", "test-coverage": "npm run test-only -- --coverage", "test-only": "jest", @@ -49,6 +49,10 @@ "rollup": "^0.66.6" }, "dependencies": { - "ml-matrix": "^5.2.0" + "ml-matrix": "^5.2.0", + "norm-dist": "^2.0.1" + }, + "jest": { + "testURL": "http://localhost" } } diff --git a/src/__tests__/curve.js b/src/__tests__/curve.js index 2b018df..41aacbe 100644 --- a/src/__tests__/curve.js +++ b/src/__tests__/curve.js @@ -43,8 +43,8 @@ test('bennet5 problem', () => { damping: 0.00001, maxIterations: 1000, errorTolerance: 1e-3, - maxBound: [11, 11, 11], - minBound: [1, 1, 1], + maxValues: [11, 11, 11], + minValues: [1, 1, 1], initialValues: [3.5, 3.8, 4] }; @@ -62,7 +62,7 @@ test('fourParamEq', () => { expect(levenbergMarquardt(data, fourParamEq, options)).toBeDeepCloseTo( { iterations: 200, - parameterError: 374.6448, + residuals: 16398.0009, parameterValues: [-16.7697, 43.4549, 1018.8938, -4.3514] }, 3 @@ -78,8 +78,8 @@ test('error is NaN', () => { expect(levenbergMarquardt(data, fourParamEq, options)).toBeDeepCloseTo( { - iterations: 0, - parameterError: NaN, + iterations: 10, + residuals: NaN, parameterValues: [-64.298, 117.4022, -47.0851, -0.06148] }, 3 diff --git a/src/__tests__/errorCalculation.js b/src/__tests__/errorCalculation.js index fa9d4f0..1eccc9d 100644 --- a/src/__tests__/errorCalculation.js +++ b/src/__tests__/errorCalculation.js @@ -1,10 +1,10 @@ -import errorCalculation from '../errorCalculation'; +import * as errCalc from '../errorCalculation'; function sinFunction([a, b]) { return (t) => a * Math.sin(b * t); } -describe('errorCalculation test', () => { +describe('sum of residuals', () => { it('Simple case', () => { const len = 20; let data = { @@ -17,7 +17,32 @@ describe('errorCalculation test', () => { data.y[i] = sampleFunction(i); } - expect(errorCalculation(data, [2, 2], sinFunction)).toBeCloseTo(0, 3); - expect(errorCalculation(data, [4, 4], sinFunction)).toBeCloseTo(48.7, 1); + expect(errCalc.sumOfResiduals(data, [2, 2], sinFunction)).toBeCloseTo(0, 5); + expect(errCalc.sumOfResiduals(data, [4, 4], sinFunction)).toBeCloseTo(48.7, 1); + + expect(errCalc.sumOfSquaredResiduals(data, [2, 2], sinFunction)).toBeCloseTo(0, 5); + expect(errCalc.sumOfSquaredResiduals(data, [4, 4], sinFunction)).toBeCloseTo(165.6, 1); + }); +}); + +describe('error propagation estimator', () => { + it('Simple case', () => { + const q = Math.PI / 2; + + errCalc.initializeErrorPropagation(10); + expect(errCalc.errorPropagation(sinFunction([4, 4]), 0, 0.001)).toBeCloseTo(0.016, 5); + expect(errCalc.errorPropagation(sinFunction([1, 1]), q, 0.010)).toBeCloseTo(0.000, 5); + + errCalc.initializeErrorPropagation(50); + expect(errCalc.errorPropagation(sinFunction([4, 4]), 0, 0.25)).toBeCloseTo(2.568135, 1); + + errCalc.initializeErrorPropagation(100); + expect(errCalc.errorPropagation(sinFunction([4, 4]), 0, 0.25)).toBeCloseTo(2.568135, 2); + + errCalc.initializeErrorPropagation(1000); + expect(errCalc.errorPropagation(sinFunction([4, 4]), 0, 0.25)).toBeCloseTo(2.568135, 4); + + errCalc.initializeErrorPropagation(10000); + expect(errCalc.errorPropagation(sinFunction([4, 4]), 0, 0.25)).toBeCloseTo(2.568135, 6); }); }); diff --git a/src/__tests__/test.js b/src/__tests__/test.js index a2b8fb6..c18c487 100644 --- a/src/__tests__/test.js +++ b/src/__tests__/test.js @@ -21,13 +21,13 @@ describe('levenberg-marquardt test', () => { initialValues: [3, 3] }; - const { parameterValues, parameterError } = levenbergMarquardt( + const { parameterValues, residuals } = levenbergMarquardt( data, sinFunction, options ); expect(parameterValues).toBeDeepCloseTo([2, 2], 3); - expect(parameterError).toBeCloseTo(0, 2); + expect(residuals).toBeCloseTo(0, 2); }); it('Exceptions', () => { @@ -36,20 +36,20 @@ describe('levenberg-marquardt test', () => { initialValues: [3, 3] }; - expect(() => levenbergMarquardt()).toThrow( + expect(() => levenbergMarquardt({}, sinFunction, { damping: -1 })).toThrow( 'The damping option must be a positive number' ); expect(() => levenbergMarquardt([1, 2], sinFunction, options)).toThrow( - 'The data parameter must have x and y elements' + 'The data object must have x and y elements' ); expect(() => levenbergMarquardt({ x: 1, y: 2 }, sinFunction, options) ).toThrow( - 'The data parameter elements must be an array with more than 2 points' + 'The data must have more than 2 points' ); expect(() => levenbergMarquardt({ x: [1, 2], y: [1, 2, 3] }, sinFunction, options) - ).toThrow('The data parameter elements must have the same size'); + ).toThrow('The data object must have equal number of x and y coordinates'); expect(() => levenbergMarquardt({ x: [1, 2], y: [1, 2] }, sumOfLorentzians, { damping: 0.1, @@ -75,13 +75,13 @@ describe('levenberg-marquardt test', () => { maxIterations: 200 }; - let { parameterValues, parameterError } = levenbergMarquardt( + let { parameterValues, residuals } = levenbergMarquardt( data, sigmoidFunction, options ); expect(parameterValues).toBeDeepCloseTo([2, 2, 2], 1); - expect(parameterError).toBeCloseTo(0, 1); + expect(residuals).toBeCloseTo(0, 1); }); it('Sum of lorentzians example', () => { @@ -109,6 +109,45 @@ describe('levenberg-marquardt test', () => { }); }); +it('Legacy', () => { + const len = 20; + let data = { + x: new Array(len), + y: new Array(len) + }; + let sampleFunction = sinFunction([2, 2]); + for (let i = 0; i < len; i++) { + data.x[i] = i; + data.y[i] = sampleFunction(i); + } + const options = { + damping: 0.1, + initialValues: [3, 3] + }; + const options1 = { + ...options, + residualEpsilon: 1 + }; + const options2 = { + ...options, + errorTolerance: 1 + }; + + const result1 = levenbergMarquardt( + data, + sinFunction, + options1 + ); + const result2 = levenbergMarquardt( + data, + sinFunction, + options2 + ); + + expect(result1.parameterValues).toBeDeepCloseTo(result2.parameterValues, 0); + expect(result1.residuals).toBe(result1.parameterError); +}); + function sinFunction([a, b]) { return (t) => a * Math.sin(b * t); } diff --git a/src/errorCalculation.js b/src/errorCalculation.js index 62d14a0..372d64a 100644 --- a/src/errorCalculation.js +++ b/src/errorCalculation.js @@ -1,18 +1,128 @@ +import nd from 'norm-dist'; + +/** + * @private + * @param {number} x + * @return {number} + */ +const sq = (x) => x * x; + +/** + * @private + * @param {...number} n + * @return {boolean} + */ +const isNaN = (...n) => n.some((x) => Number.isNaN(x)); + +/** + * Compute equiprobable ranges in normal distribution + * @private + * @param {number} iterations - How many evaluations (per point per step) of the fitted function to use to approximate the error propagation through it + */ +export function initializeErrorPropagation(iterations) { + if (equiprobableStops.length === iterations) return; + + const min = nd.cdf(-2.25); + const max = 1 - min; + const step = (max - min) / (iterations - 1); + + for (var i = 0, x = min; i < iterations; i++, x += step) { + equiprobableStops[i] = nd.icdf(x); + } +} + +/** @type {number[]} */ +const equiprobableStops = []; + +/** + * Estimate error propagation through a function + * @private + * @param {(x: number) => number} fn - The function to approximate, outside of the function domain return `NaN` + * @param {number} x - The error propagation will be approximate in the neighbourhood of this point + * @param {number} xSigma - The standard deviation of `x` + * @return {number} The estimated standard deviation of `fn(x)` + */ +export function errorPropagation(fn, x, xSigma) { + const stopCount = equiprobableStops.length; + + var slope = 0; + var N = 0; + + var xLast = x + xSigma * equiprobableStops[0]; + var yLast = fn(xLast); + + for (var stop = 1; stop < stopCount; stop++) { + var xNew = x + xSigma * equiprobableStops[stop]; + var yNew = fn(xNew); + + if (!isNaN(xNew, yNew, xLast, yLast)) { + slope += (yNew - yLast) / (xNew - xLast); + N++; + } + + xLast = xNew; + yLast = yNew; + } + + const avgSlope = slope / N; + + return Math.abs(avgSlope * xSigma); +} + /** - * Calculate current error + * Approximate errors in point location and calculate point weights + * @ignore + * @param {{x:Array, y:Array, xError:Array|void, yError:Array|void}} data - Array of points to fit in the format [x1, x2, ... ], [y1, y2, ... ] + * @param {Array} params - Array of parameter values + * @param {(n: number[]) => (x: number) => number} paramFunction - Takes the parameters and returns a function with the independent variable as a parameter + * @return {Array} Array of point weights + */ +export function pointWeights(data, params, paramFunction) { + const m = data.x.length; + + /** @type {Array} */ + const errs = new Array(m); + + if (!data.xError) { + if (!data.yError) { + return errs.fill(1); + } else { + errs.splice(0, m, ...data.yError); + } + } else { + const fn = paramFunction(params); + var point; + + for (point = 0; point < m; point++) { + errs[point] = errorPropagation(fn, data.x[point], data.xError[point]); + } + + if (data.yError) { + for (point = 0; point < m; point++) { + errs[point] = Math.sqrt(sq(errs[point]) + sq(data.yError[point])); + } + } + } + + // Point weight is the reciprocal of its error + for (point = 0; point < m; point++) { + errs[point] = 1 / errs[point]; + } + + return errs; +} + +/** + * Calculate the current sum of residuals * @ignore * @param {{x:Array, y:Array}} data - Array of points to fit in the format [x1, x2, ... ], [y1, y2, ... ] * @param {Array} parameters - Array of current parameter values - * @param {function} parameterizedFunction - The parameters and returns a function with the independent variable as a parameter + * @param {(...n: number[]) => (x: number) => number} paramFunction - The parameters and returns a function with the independent variable as a parameter * @return {number} */ -export default function errorCalculation( - data, - parameters, - parameterizedFunction -) { +export function sumOfResiduals(data, parameters, paramFunction) { var error = 0; - const func = parameterizedFunction(parameters); + const func = paramFunction(parameters); for (var i = 0; i < data.x.length; i++) { error += Math.abs(data.y[i] - func(data.x[i])); @@ -20,3 +130,22 @@ export default function errorCalculation( return error; } + +/** + * Calculate the current sum of squares of residuals + * @ignore + * @param {{x:Array, y:Array}} data - Array of points to fit in the format [x1, x2, ... ], [y1, y2, ... ] + * @param {Array} parameters - Array of current parameter values + * @param {(...n: number[]) => (x: number) => number} paramFunction - The parameters and returns a function with the independent variable as a parameter + * @return {number} + */ +export function sumOfSquaredResiduals(data, parameters, paramFunction) { + var error = 0; + const func = paramFunction(parameters); + + for (var i = 0; i < data.x.length; i++) { + error += sq(data.y[i] - func(data.x[i])); + } + + return error; +} diff --git a/src/index.js b/src/index.js index a9544b2..fa0e646 100644 --- a/src/index.js +++ b/src/index.js @@ -1,98 +1,140 @@ -import errorCalculation from './errorCalculation'; +import { + initializeErrorPropagation, + sumOfSquaredResiduals +} from './errorCalculation'; import step from './step'; +import * as legacy from './legacy'; + +/** + * @typedef {Object} Result + * @property {Array} parameterValues - The computed values of parameters + * @property {number} residuals - Sum of squared residuals of the final fit + * @property {number} iterations - Number of iterations used + */ /** * Curve fitting algorithm - * @param {{x:Array, y:Array}} data - Array of points to fit in the format [x1, x2, ... ], [y1, y2, ... ] - * @param {function} parameterizedFunction - The parameters and returns a function with the independent variable as a parameter + * @param {{x:Array, y:Array, xError:Array|void, yError:Array|void}} data - Array of points to fit in the format [x1, x2, ... ], [y1, y2, ... ] + * @param {(...n: number[]) => (x: number) => number} paramFunction - Takes the parameters and returns a function with the independent variable as a parameter * @param {object} [options] - Options object - * @param {number} [options.damping] - Levenberg-Marquardt parameter - * @param {number} [options.gradientDifference = 10e-2] - Adjustment for decrease the damping parameter + * @param {number} [options.damping = 0.1] - Levenberg-Marquardt lambda parameter + * @param {number} [options.dampingDrop = 0.1] - The constant used to lower the damping parameter + * @param {number} [options.dampingBoost = 1.5] - The constant used to increase the damping parameter + * @param {number} [options.maxDamping] - Maximum value for the damping parameter + * @param {number} [options.minDamping] - Minimum value for the damping parameter + * @param {number} [options.gradientDifference = 1e-6] - The "infinitesimal" value used to approximate the gradient of the parameter space * @param {Array} [options.minValues] - Minimum allowed values for parameters * @param {Array} [options.maxValues] - Maximum allowed values for parameters * @param {Array} [options.initialValues] - Array of initial parameter values * @param {number} [options.maxIterations = 100] - Maximum of allowed iterations - * @param {number} [options.errorTolerance = 10e-3] - Minimum uncertainty allowed for each point - * @return {{parameterValues: Array, parameterError: number, iterations: number}} + * @param {number} [options.residualEpsilon = 1e-6] - Minimum change of the sum of residuals per step – if the sum of residuals changes less than this number, the algorithm will stop + * @param {Array} [options.maxValues] - Maximum values for the parameters + * @param {Array} [options.minValues] - Minimum values for the parameters + * @param {number} [options.errorPropagation = 50] - How many evaluations (per point per step) of the fitted function to use to approximate the error propagation through it + * @return {Result} */ -export default function levenbergMarquardt( - data, - parameterizedFunction, - options = {} -) { +export default function levenbergMarquardt(data, paramFunction, options = {}) { let { maxIterations = 100, - gradientDifference = 10e-2, - damping = 0, - errorTolerance = 10e-3, - minValues, + gradientDifference = 1e-6, + damping = 0.1, + dampingDrop = 0.1, + dampingBoost = 1.5, + maxDamping = Number.MAX_SAFE_INTEGER, + minDamping = Number.EPSILON, maxValues, - initialValues - } = options; + minValues, + residualEpsilon = 1e-6, + initialValues, + errorPropagation = 50 + } = legacy.compatOptions(options); if (damping <= 0) { throw new Error('The damping option must be a positive number'); - } else if (!data.x || !data.y) { - throw new Error('The data parameter must have x and y elements'); + } else if (!data || !data.x || !data.y) { + throw new Error('The data object must have x and y elements'); } else if ( !Array.isArray(data.x) || data.x.length < 2 || !Array.isArray(data.y) || data.y.length < 2 ) { + throw new Error('The data must have more than 2 points'); + } else if (data.x.length !== data.y.length) { throw new Error( - 'The data parameter elements must be an array with more than 2 points' + 'The data object must have equal number of x and y coordinates' ); - } else if (data.x.length !== data.y.length) { - throw new Error('The data parameter elements must have the same size'); } - var parameters = - initialValues || new Array(parameterizedFunction.length).fill(1); - let parLen = parameters.length; + let params = initialValues || new Array(paramFunction.length).fill(1); + let parLen = params.length; maxValues = maxValues || new Array(parLen).fill(Number.MAX_SAFE_INTEGER); minValues = minValues || new Array(parLen).fill(Number.MIN_SAFE_INTEGER); - if (maxValues.length !== minValues.length) { - throw new Error('minValues and maxValues should have the same size'); + if (!Array.isArray(params)) { + throw new Error('initialValues must be an array'); } - if (!Array.isArray(parameters)) { - throw new Error('initialValues must be an array'); + if ( + maxValues.length !== minValues.length || + maxValues.length !== params.length + ) { + throw new Error( + 'minValues and maxValues must have the same size as the number of parameters' + ); } - var error = errorCalculation(data, parameters, parameterizedFunction); + initializeErrorPropagation(errorPropagation); + + /** @type Array */ + var residualDifferences = Array(10).fill(NaN); - var converged = error <= errorTolerance; + var residuals = sumOfSquaredResiduals(data, params, paramFunction); + var converged = false; for ( var iteration = 0; iteration < maxIterations && !converged; iteration++ ) { - parameters = step( + var params2 = step( data, - parameters, + params, damping, gradientDifference, - parameterizedFunction + paramFunction ); for (let k = 0; k < parLen; k++) { - parameters[k] = Math.min( - Math.max(minValues[k], parameters[k]), - maxValues[k] - ); + params2[k] = Math.min(Math.max(minValues[k], params2[k]), maxValues[k]); } - error = errorCalculation(data, parameters, parameterizedFunction); - if (isNaN(error)) break; - converged = error <= errorTolerance; + var residuals2 = sumOfSquaredResiduals(data, params2, paramFunction); + + if (isNaN(residuals2)) throw new Error('The function evaluates to NaN.'); + + if (residuals2 < residuals) { + params = params2; + residuals = residuals2; + damping *= dampingDrop; + } else { + damping *= dampingBoost; + } + + damping = Math.max(minDamping, Math.min(maxDamping, damping)); + + residualDifferences.shift(); + residualDifferences.push(residuals - residuals2); + converged = + residualDifferences.reduce((a, b) => Math.max(a, b)) <= residualEpsilon; } - return { - parameterValues: parameters, - parameterError: error, + /** @type {Result} */ + let result = { + parameterValues: params, + residuals: sumOfSquaredResiduals(data, params, paramFunction), iterations: iteration }; + + return legacy.compatReturn(result); } diff --git a/src/legacy.js b/src/legacy.js new file mode 100644 index 0000000..e5e57fa --- /dev/null +++ b/src/legacy.js @@ -0,0 +1,59 @@ + +/** + * @typedef {Object} Options + * @prop {number} [damping = 0.1] - Levenberg-Marquardt lambda parameter + * @prop {number} [dampingDrop = 0.1] - The constant used to lower the damping parameter + * @prop {number} [dampingBoost = 1.5] - The constant used to increase the damping parameter + * @prop {number} [maxDamping] - Maximum value for the damping parameter + * @prop {number} [minDamping] - Minimum value for the damping parameter + * @prop {number} [gradientDifference = 1e-6] - The "infinitesimal" value used to approximate the gradient of the parameter space + * @prop {Array} [initialValues] - Array of initial parameter values + * @prop {number} [maxIterations = 100] - Maximum of allowed iterations + * @prop {number} [residualEpsilon = 1e-6] - Minimum change of the sum of residuals per step – if the sum of residuals changes less than this number, the algorithm will stop + * @prop {Array} [maxValues] - Maximum values for the parameters + * @prop {Array} [minValues] - Minimum values for the parameters + * @prop {{rough: number, fine: number}} [errorPropagation] - How many evaluations (per point per step) of the fitted function to use to approximate the error propagation through it + * @prop {number} [errorPropagation.rough = 10] - Number of iterations for rough estimation + * @prop {number} [errorPropagation.fine = 50] - Number of iterations for fine estimation + * @return {Result} + */ + +/** + * @typedef {Object} LegacyOptions + * @prop {number} [errorTolerance] - The old name for `residualEpsilon` + */ + +/** + * @param {Options & LegacyOptions} legacy - The options object which can possibly contain deprecated values + * @return {Options} The options object according to the current API + */ +export function compatOptions(legacy) { + if (Object.prototype.hasOwnProperty.call(legacy, 'errorTolerance')) { + legacy.residualEpsilon = legacy.errorTolerance; + } + return legacy; +} + + +/** + * @typedef {Object} Result + * @prop {Array} parameterValues - The computed values of parameters + * @prop {number} residuals - Sum of squared residuals of the final fit + * @prop {number} iterations - Number of iterations used + */ + +/** + * @typedef {Object} LegacyResult + * @property {number} parameterError - The old name for `residuals` + */ + +/** + * @param {Result} modern - The result according to the current API + * @return {Result & LegacyResult} The result object augmented by the backwards-compatibility parameters + */ +export function compatReturn(modern) { + return { + ...modern, + parameterError: modern.residuals + }; +} diff --git a/src/step.js b/src/step.js index b2d53e8..3f59674 100644 --- a/src/step.js +++ b/src/step.js @@ -1,5 +1,7 @@ import { inverse, Matrix } from 'ml-matrix'; +import { pointWeights } from './errorCalculation'; + /** * Difference of the matrix function over the parameters * @ignore @@ -7,7 +9,7 @@ import { inverse, Matrix } from 'ml-matrix'; * @param {Array} evaluatedData - Array of previous evaluated function values * @param {Array} params - Array of previous parameter values * @param {number} gradientDifference - Adjustment for decrease the damping parameter - * @param {function} paramFunction - The parameters and returns a function with the independent variable as a parameter + * @param {(n: number[]) => (x: number) => number} paramFunction - Takes the parameters and returns a function with the independent variable as a parameter * @return {Matrix} */ function gradientFunction( @@ -20,6 +22,9 @@ function gradientFunction( const n = params.length; const m = data.x.length; + const weights = pointWeights(data, params, paramFunction); + + /** @type Array> */ var ans = new Array(n); for (var param = 0; param < n; param++) { @@ -29,7 +34,7 @@ function gradientFunction( var funcParam = paramFunction(auxParams); for (var point = 0; point < m; point++) { - ans[param][point] = evaluatedData[point] - funcParam(data.x[point]); + ans[param][point] = (evaluatedData[point] - funcParam(data.x[point])) * weights[point]; } } return new Matrix(ans); @@ -45,6 +50,7 @@ function gradientFunction( function matrixFunction(data, evaluatedData) { const m = data.x.length; + /** @type Array */ var ans = new Array(m); for (var point = 0; point < m; point++) { @@ -57,11 +63,11 @@ function matrixFunction(data, evaluatedData) { /** * Iteration for Levenberg-Marquardt * @ignore - * @param {{x:Array, y:Array}} data - Array of points to fit in the format [x1, x2, ... ], [y1, y2, ... ] + * @param {{x:Array, y:Array, xError:Array|void, yError:Array|void}} data * @param {Array} params - Array of previous parameter values * @param {number} damping - Levenberg-Marquardt parameter * @param {number} gradientDifference - Adjustment for decrease the damping parameter - * @param {function} parameterizedFunction - The parameters and returns a function with the independent variable as a parameter + * @param {(n: number[]) => (x: number) => number} paramFunction - Takes the parameters and returns a function with the independent variable as a parameter * @return {Array} */ export default function step( @@ -69,34 +75,34 @@ export default function step( params, damping, gradientDifference, - parameterizedFunction + paramFunction ) { - var value = damping * gradientDifference * gradientDifference; - var identity = Matrix.eye(params.length, params.length, value); + var scaledDamping = damping * gradientDifference * gradientDifference; + var identity = Matrix.eye(params.length, params.length, scaledDamping); - const func = parameterizedFunction(params); + const func = paramFunction(params); var evaluatedData = data.x.map((e) => func(e)); - var gradientFunc = gradientFunction( + var gradient = gradientFunction( data, evaluatedData, params, gradientDifference, - parameterizedFunction + paramFunction ); - var matrixFunc = matrixFunction(data, evaluatedData); + var residuals = matrixFunction(data, evaluatedData); var inverseMatrix = inverse( - identity.add(gradientFunc.mmul(gradientFunc.transpose())) + identity.add(gradient.mmul(gradient.transpose())) ); - params = new Matrix([params]); - params = params.sub( + let params2 = new Matrix([params]); + params2 = params2.sub( inverseMatrix - .mmul(gradientFunc) - .mmul(matrixFunc) + .mmul(gradient) + .mmul(residuals) .mul(gradientDifference) .transpose() ); - return params.to1DArray(); + return params2.to1DArray(); }