24 #ifndef DOXYGEN_SHOULD_SKIP_THIS
25 struct GRADIENT_THREAD_PARAM
61 void CInferenceMethod::init()
80 int32_t num_importance_samples,
float64_t ridge_size)
113 scaled_kernel(i,i)+=ridge_size;
126 ASSERT(log_likelihood.
vlen==num_importance_samples);
127 ASSERT(log_likelihood.
vlen==log_pdf_prior.vlen);
131 sum[i]=log_likelihood[i]+log_pdf_prior[i]-log_pdf_post_approx[i];
162 for (
index_t i=0; i<num_deriv; i++)
166 GRADIENT_THREAD_PARAM thread_params;
168 thread_params.inf=
this;
169 thread_params.obj=node->data;
170 thread_params.param=node->key;
171 thread_params.grad=result;
172 thread_params.lock=&lock;
180 pthread_t* threads=SG_MALLOC(pthread_t, num_deriv);
181 GRADIENT_THREAD_PARAM* thread_params=SG_MALLOC(GRADIENT_THREAD_PARAM,
184 for (
index_t t=0; t<num_deriv; t++)
188 thread_params[t].inf=
this;
189 thread_params[t].obj=node->data;
190 thread_params[t].param=node->key;
191 thread_params[t].grad=result;
192 thread_params[t].lock=&lock;
195 (
void*)&thread_params[t]);
198 for (
index_t t=0; t<num_deriv; t++)
199 pthread_join(threads[t], NULL);
201 SG_FREE(thread_params);
211 GRADIENT_THREAD_PARAM* thread_param=(GRADIENT_THREAD_PARAM*)p;
217 CLock* lock=thread_param->lock;
219 REQUIRE(param,
"Parameter should not be NULL\n");
220 REQUIRE(obj,
"Object of the parameter should not be NULL\n");
239 else if (obj==inf->
m_mean)
246 SG_SERROR(
"Can't compute derivative of negative log marginal "
251 grad->
add(param, gradient);
267 "Number of training features must be greater than zero\n")
270 "Number of labels must be greater than zero\n")
272 "Number of training vectors must match number of labels, which is "
273 "%d, but number of training vectors is %d\n",
virtual void set_labels(CLabels *lab)
virtual const char * get_name() const =0
virtual void set_model(CLikelihoodModel *mod)
virtual bool init(CFeatures *lhs, CFeatures *rhs)
The Inference Method base class.
virtual void set_features(CFeatures *feat)
The class Labels models labels, i.e. class assignments of objects.
virtual int32_t get_num_labels() const =0
virtual ~CInferenceMethod()
CMapNode< K, T > * get_node_ptr(int32_t index)
virtual int32_t get_num_vectors() const =0
virtual SGVector< float64_t > get_mean_vector(const CFeatures *features) const =0
int32_t get_num_elements() const
An abstract class of the mean function.
virtual SGVector< float64_t > get_derivative_wrt_likelihood_model(const TParameter *param)=0
SGMatrix< float64_t > get_kernel_matrix()
virtual bool update_parameter_hash()
virtual SGVector< float64_t > get_log_probability_fmatrix(const CLabels *lab, SGMatrix< float64_t > F) const
Class SGObject is the base class of all shogun objects.
Class Lock used for synchronization in concurrent programs.
the class CMap, a map based on the hash-table. w: http://en.wikipedia.org/wiki/Hash_table ...
virtual SGVector< float64_t > get_derivative_wrt_inference_method(const TParameter *param)=0
virtual void update_train_kernel()
virtual SGVector< float64_t > get_derivative_wrt_kernel(const TParameter *param)=0
virtual SGVector< float64_t > log_pdf_multiple(SGMatrix< float64_t > samples) const
virtual void set_kernel(CKernel *kern)
float64_t get_marginal_likelihood_estimate(int32_t num_importance_samples=1, float64_t ridge_size=1e-15)
all of classes and functions are contained in the shogun namespace
Dense version of the well-known Gaussian probability distribution, defined as .
virtual SGVector< float64_t > get_derivative_wrt_mean(const TParameter *param)=0
int32_t add(const K &key, const T &data)
virtual void set_mean(CMeanFunction *m)
virtual SGMatrix< float64_t > get_posterior_covariance()=0
The class Features is the base class of all feature objects.
virtual void check_members() const
virtual SGVector< float64_t > get_posterior_mean()=0
static T log_mean_exp(SGVector< T > values)
The Likelihood model base class.
SGMatrix< float64_t > m_ktrtr
virtual CMap< TParameter *, SGVector< float64_t > > * get_negative_log_marginal_likelihood_derivatives(CMap< TParameter *, CSGObject * > *parameters)
CLikelihoodModel * m_model
virtual SGMatrix< float64_t > sample(int32_t num_samples, SGMatrix< float64_t > pre_samples=SGMatrix< float64_t >()) const
static void * get_derivative_helper(void *p)