iqbal: Improve convergence of the optimization

- We need to only take the general direction of the gradient
   and not the actual value (hence the division by sum of abs)

 - We take new values even if the score is equal, it might get
   us out of a dead lock

 - When reducing the step size, we look at how much we overshoot
   to get a better new step size

 - If the gain was lower than 1%, we quit, but we still take the
   new value

Signed-off-by: Sylvain Munaut <tnt@246tNt.com>
This commit is contained in:
Sylvain Munaut 2013-03-08 17:50:21 +01:00
parent 2e8cef8f61
commit 42a684e0bf
1 changed files with 8 additions and 7 deletions

View File

@ -319,26 +319,27 @@ osmo_iqbal_cxvec_optimize(const struct osmo_cxvec *sig, float *mag, float *phase
}
cv = _iqbal_objfn_val_gradient(state, cx, cgrad);
step = 0.1f * cv / (fabs(cgrad[0]) + fabs(cgrad[1]));
step = cv / (fabs(cgrad[0]) + fabs(cgrad[1]));
for (i=0; i<opts->max_iter; i++)
{
nx[0] = cx[0] - step * cgrad[0];
nx[1] = cx[1] - step * cgrad[1];
nx[0] = cx[0] - step * (cgrad[0] / (fabs(cgrad[0]) + fabs(cgrad[1])));
nx[1] = cx[1] - step * (cgrad[1] / (fabs(cgrad[0]) + fabs(cgrad[1])));
nv = _iqbal_objfn_value(state, nx);
if (nv < cv) {
if (nv <= cv) {
p = (cv - nv) / cv;
if (p < 0.01f)
break;
cx[0] = nx[0];
cx[1] = nx[1];
cv = nv;
_iqbal_objfn_gradient(state, cx, cv, cgrad);
if (p < 0.01f)
break;
} else {
step /= 2.0f;
step /= 2.0 * (nv / cv);
}
}