Skip to content

Commit

Permalink
fix(nelder-mead): argsort before early exit (#984)
Browse files Browse the repository at this point in the history
  • Loading branch information
jmoralez authored Feb 11, 2025
1 parent 52a5c9a commit 788d170
Show file tree
Hide file tree
Showing 7 changed files with 37 additions and 36 deletions.
23 changes: 12 additions & 11 deletions include/statsforecast/nelder_mead.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ namespace nm {
using Eigen::VectorXd;
using RowMajorMatrixXd =
Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
using OptimResult = std::tuple<VectorXd, double, int, RowMajorMatrixXd>;

inline auto Clamp(const VectorXd &x, const VectorXd &lower,
const VectorXd &upper) {
Expand All @@ -26,11 +27,11 @@ inline Eigen::VectorX<Eigen::Index> ArgSort(const VectorXd &v) {
}

template <typename Func, typename... Args>
std::tuple<VectorXd, double, int>
NelderMead(Func F, const VectorXd &x, const VectorXd &lower,
const VectorXd upper, double init_step, double zero_pert,
double alpha, double gamma, double rho, double sigma, int max_iter,
double tol_std, bool adaptive, Args &&...args) {
OptimResult NelderMead(Func F, const VectorXd &x, const VectorXd &lower,
const VectorXd upper, double init_step, double zero_pert,
double alpha, double gamma, double rho, double sigma,
int max_iter, double tol_std, bool adaptive,
Args &&...args) {
auto x0 = Clamp(x, lower, upper);
auto n = x0.size();
if (adaptive) {
Expand All @@ -57,17 +58,17 @@ NelderMead(Func F, const VectorXd &x, const VectorXd &lower,
int i;
Eigen::Index best_idx = 0;
for (i = 0; i < max_iter; ++i) {
// check whether method should stop
if (StandardDeviation(f_simplex) < tol_std) {
break;
}

// Step1: order of f_simplex
Eigen::VectorX<Eigen::Index> order_f = ArgSort(f_simplex);
best_idx = order_f(0);
auto worst_idx = order_f(n);
auto second_worst_idx = order_f(n - 1);

// check whether method should stop
if (StandardDeviation(f_simplex) < tol_std) {
break;
}

// calculate centroid as the col means removing the row with the max fval
VectorXd x_o = (simplex.colwise().sum() - simplex.row(worst_idx)) / n;

Expand Down Expand Up @@ -131,6 +132,6 @@ NelderMead(Func F, const VectorXd &x, const VectorXd &lower,
f_simplex(j) = F(simplex.row(j), std::forward<Args>(args)...);
}
}
return {simplex.row(best_idx), f_simplex(best_idx), i + 1};
return {simplex.row(best_idx), f_simplex(best_idx), i + 1, simplex};
}
} // namespace nm
2 changes: 1 addition & 1 deletion nbs/src/ets.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -755,7 +755,7 @@
" 1_000,\n",
" True,\n",
" )\n",
" return results(*opt_res, None)"
" return results(*opt_res)"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion nbs/src/theta.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,7 @@
" modeltype,\n",
" nmse\n",
" )\n",
" return results(*opt_res, None) # type: ignore"
" return results(*opt_res)"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion python/statsforecast/ets.py
Original file line number Diff line number Diff line change
Expand Up @@ -501,7 +501,7 @@ def optimize_ets_target_fn(
1_000,
True,
)
return results(*opt_res, None)
return results(*opt_res)

# %% ../../nbs/src/ets.ipynb 26
def etsmodel(
Expand Down
2 changes: 1 addition & 1 deletion python/statsforecast/theta.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def optimize_theta_target_fn(
modeltype,
nmse,
)
return results(*opt_res, None) # type: ignore
return results(*opt_res)

# %% ../../nbs/src/theta.ipynb 14
def thetamodel(
Expand Down
18 changes: 9 additions & 9 deletions src/ets.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -279,15 +279,15 @@ double ObjectiveFunction(const VectorXd &params, const VectorXd &y, int n_state,
}
return obj_val;
}
std::tuple<VectorXd, double, int>
Optimize(const Eigen::Ref<const VectorXd> &x0,
const Eigen::Ref<const VectorXd> &y, int n_state, Component error,
Component trend, Component season, Criterion opt_crit, int n_mse,
int m, bool opt_alpha, bool opt_beta, bool opt_gamma, bool opt_phi,
double alpha, double beta, double gamma, double phi,
const Eigen::Ref<const VectorXd> &lower,
const Eigen::Ref<const VectorXd> &upper, double tol_std, int max_iter,
bool adaptive) {
nm::OptimResult Optimize(const Eigen::Ref<const VectorXd> &x0,
const Eigen::Ref<const VectorXd> &y, int n_state,
Component error, Component trend, Component season,
Criterion opt_crit, int n_mse, int m, bool opt_alpha,
bool opt_beta, bool opt_gamma, bool opt_phi,
double alpha, double beta, double gamma, double phi,
const Eigen::Ref<const VectorXd> &lower,
const Eigen::Ref<const VectorXd> &upper,
double tol_std, int max_iter, bool adaptive) {
double init_step = 0.05;
double nm_alpha = 1.0;
double nm_gamma = 2.0;
Expand Down
24 changes: 12 additions & 12 deletions src/theta.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,10 +120,10 @@ pegels_resid(const Eigen::Ref<const VectorXd> &y, ModelType model_type,
return {amse, e, states, mse};
}

double theta_target_fn(const VectorXd &params, double init_level,
double init_alpha, double init_theta, bool opt_level,
bool opt_alpha, bool opt_theta, const VectorXd &y,
ModelType model_type, size_t nmse) {
double target_fn(const VectorXd &params, double init_level, double init_alpha,
double init_theta, bool opt_level, bool opt_alpha,
bool opt_theta, const VectorXd &y, ModelType model_type,
size_t nmse) {
RowMajorMatrixXd states = RowMajorMatrixXd::Zero(y.size(), 5);
size_t j = 0;
double level, alpha, theta;
Expand Down Expand Up @@ -152,13 +152,13 @@ double theta_target_fn(const VectorXd &params, double init_level,
return mse;
}

std::tuple<VectorXd, double, int>
optimize(const Eigen::Ref<const VectorXd> &x0,
const Eigen::Ref<const VectorXd> &lower,
const Eigen::Ref<const VectorXd> &upper, double init_level,
double init_alpha, double init_theta, bool opt_level, bool opt_alpha,
bool opt_theta, const Eigen::Ref<const VectorXd> &y,
ModelType model_type, size_t nmse) {
nm::OptimResult optimize(const Eigen::Ref<const VectorXd> &x0,
const Eigen::Ref<const VectorXd> &lower,
const Eigen::Ref<const VectorXd> &upper,
double init_level, double init_alpha,
double init_theta, bool opt_level, bool opt_alpha,
bool opt_theta, const Eigen::Ref<const VectorXd> &y,
ModelType model_type, size_t nmse) {
double init_step = 0.05;
double zero_pert = 1e-4;
double alpha = 1.0;
Expand All @@ -168,7 +168,7 @@ optimize(const Eigen::Ref<const VectorXd> &x0,
int max_iter = 1'000;
double tol_std = 1e-4;
bool adaptive = true;
return nm::NelderMead(theta_target_fn, x0, lower, upper, init_step, zero_pert,
return nm::NelderMead(target_fn, x0, lower, upper, init_step, zero_pert,
alpha, gamma, rho, sigma, max_iter, tol_std, adaptive,
init_level, init_alpha, init_theta, opt_level,
opt_alpha, opt_theta, y, model_type, nmse);
Expand Down

0 comments on commit 788d170

Please sign in to comment.