36 void CLibLinearRegression::init_defaults()
45 void CLibLinearRegression::register_parameters()
72 if (num_vec!=num_train_labels)
74 SG_ERROR(
"number of vectors %d does not match "
75 "number of training labels %d\n",
76 num_vec, num_train_labels);
84 liblinear_problem prob;
103 double* Cs = SG_MALLOC(
double, prob.l);
104 for(
int i = 0; i < prob.l; i++)
116 solve_l2r_l1l2_svr(&prob);
119 solve_l2r_l1l2_svr(&prob);
122 SG_ERROR(
"Error: unknown regression type\n")
157 void CLibLinearRegression::solve_l2r_l1l2_svr(
const liblinear_problem *prob)
162 int w_size = prob->n;
167 int *index =
new int[l];
171 double Gmax_new, Gnorm1_new;
172 double Gnorm1_init = 0.0;
173 double *beta =
new double[l];
174 double *QD =
new double[l];
178 double lambda[1], upper_bound[1];
193 for(i=0; i<w_size; i++)
197 QD[i] = prob->x->
dot(i, prob->x,i);
198 prob->x->add_to_dense_vec(beta[i], i,
w.
vector, w_size);
207 while(iter < max_iter)
212 for(i=0; i<active_size; i++)
218 for(s=0; s<active_size; s++)
221 G = -y[i] + lambda[
GETI(i)]*beta[i];
222 H = QD[i] + lambda[
GETI(i)];
224 G += prob->x->dense_dot(i,
w.
vector, w_size);
230 double violation = 0;
237 else if(Gp>Gmax_old && Gn<-Gmax_old)
245 else if(beta[i] >= upper_bound[
GETI(i)])
249 else if(Gp < -Gmax_old)
257 else if(beta[i] <= -upper_bound[
GETI(i)])
261 else if(Gn > Gmax_old)
270 violation = fabs(Gp);
272 violation = fabs(Gn);
275 Gnorm1_new += violation;
280 else if(Gn > H*beta[i])
285 if(fabs(d) < 1.0e-12)
288 double beta_old = beta[i];
290 d = beta[i]-beta_old;
294 prob->x->add_to_dense_vec(d, i,
w.
vector, w_size);
302 Gnorm1_init = Gnorm1_new;
307 if(Gnorm1_new <= eps*Gnorm1_init)
323 SG_INFO(
"\noptimization finished, #iter = %d\n", iter)
325 SG_INFO(
"\nWARNING: reaching max number of iterations\nUsing -s 11 may be faster\n\n")
330 for(i=0; i<w_size; i++)
335 v += p*fabs(beta[i]) - y[i]*beta[i] + 0.5*lambda[
GETI(i)]*beta[i]*beta[i];
340 SG_INFO(
"Objective value = %lf\n", v)
void set_max_iter(int32_t max_iter)
virtual bool train_machine(CFeatures *data=NULL)
virtual ELabelType get_label_type() const =0
The class Labels models labels, i.e. class assignments of objects.
static const float64_t INFTY
infinity
virtual int32_t get_num_labels() const =0
real valued labels (e.g. for regression, classifier outputs)
static float64_t log10(float64_t v)
tanh(x), x being a complex128_t
L2 regularized support vector regression with L1 epsilon tube loss.
virtual int32_t get_num_vectors() const =0
void tron(float64_t *w, float64_t max_train_time)
float64_t m_max_train_time
void set_liblinear_regression_type(LIBLINEAR_REGRESSION_TYPE st)
Features that support dot products among other operations.
virtual int32_t get_dim_feature_space() const =0
L2 regularized support vector regression with L2 epsilon tube loss.
L2 regularized support vector regression with L2 epsilon tube loss (dual)
static void clear_cancel()
virtual void set_features(CDotFeatures *feat)
static T max(T a, T b)
return the maximum of two integers
Class LinearMachine is a generic interface for all kinds of linear machines like classifiers.
LIBLINEAR_REGRESSION_TYPE m_liblinear_regression_type
static float64_t dot(const bool *v1, const bool *v2, int32_t n)
compute dot product between v1 and v2 (blas optimized)
all of classes and functions are contained in the shogun namespace
The class Features is the base class of all feature objects.
static T min(T a, T b)
return the minimum of two integers
void set_use_bias(bool use_bias)
static void swap(T &a, T &b)
swap e.g. floats a and b
virtual ~CLibLinearRegression()
virtual void set_labels(CLabels *lab)
#define SG_SABS_PROGRESS(...)
void set_epsilon(float64_t epsilon)