Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

#5031, Kernel constructor cleanup #5036

Merged
merged 1 commit into from
May 17, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 16 additions & 35 deletions src/shogun/kernel/ANOVAKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,26 +7,27 @@

#include <shogun/mathematics/Math.h>
#include <shogun/kernel/ANOVAKernel.h>
#include <shogun/mathematics/Math.h>

using namespace shogun;

ANOVAKernel::ANOVAKernel(): DotKernel(0), cardinality(1.0)
ANOVAKernel::ANOVAKernel(): DotKernel(0)
{
register_params();
SG_ADD(
&m_cardinality, "cardinality", "Kernel cardinality.",
ParameterProperties::HYPER);
}

ANOVAKernel::ANOVAKernel(int32_t cache, int32_t d)
: DotKernel(cache), cardinality(d)
ANOVAKernel::ANOVAKernel(int32_t size, int32_t d)
: ANOVAKernel()
{
register_params();
set_cache_size(size);
m_cardinality = d;
}

ANOVAKernel::ANOVAKernel(
const std::shared_ptr<DenseFeatures<float64_t>>& l, const std::shared_ptr<DenseFeatures<float64_t>>& r, int32_t d, int32_t cache)
: DotKernel(cache), cardinality(d)
: ANOVAKernel(cache, d)
{
register_params();
init(l, r);
}

Expand Down Expand Up @@ -88,20 +89,14 @@ float64_t ANOVAKernel::compute_rec2(int32_t idx_a, int32_t idx_b)
return result;
}

void ANOVAKernel::register_params()
{
SG_ADD(&cardinality, "cardinality", "Kernel cardinality.", ParameterProperties::HYPER);
}


float64_t ANOVAKernel::compute_recursive1(float64_t* avec, float64_t* bvec, int32_t len)
{
int32_t DP_len=(cardinality+1)*(len+1);
int32_t DP_len=(m_cardinality+1)*(len+1);
float64_t* DP = SG_MALLOC(float64_t, DP_len);

ASSERT(DP)
int32_t d=cardinality;
int32_t offs=cardinality+1;
int32_t d=m_cardinality;
int32_t offs=m_cardinality+1;

ASSERT(DP_len==(len+1)*offs)

Expand All @@ -128,15 +123,15 @@ float64_t ANOVAKernel::compute_recursive1(float64_t* avec, float64_t* bvec, int3

float64_t ANOVAKernel::compute_recursive2(float64_t* avec, float64_t* bvec, int32_t len)
{
float64_t* KD = SG_MALLOC(float64_t, cardinality+1);
float64_t* KS = SG_MALLOC(float64_t, cardinality+1);
float64_t* KD = SG_MALLOC(float64_t, m_cardinality+1);
float64_t* KS = SG_MALLOC(float64_t, m_cardinality+1);
float64_t* vec_pow = SG_MALLOC(float64_t, len);

ASSERT(vec_pow)
ASSERT(KS)
ASSERT(KD)

int32_t d=cardinality;
int32_t d=m_cardinality;
for (int32_t i=0; i < len; i++)
vec_pow[i] = 1;

Expand Down Expand Up @@ -171,18 +166,4 @@ float64_t ANOVAKernel::compute_recursive2(float64_t* avec, float64_t* bvec, int3
SG_FREE(KD);

return result;
}

std::shared_ptr<ANOVAKernel> ANOVAKernel::obtain_from_generic(const std::shared_ptr<Kernel>& kernel)
{
if (!kernel)
return NULL;

require(kernel->get_kernel_type()==K_ANOVA, "Provided kernel is "
"not of type CANOVAKernel, but type {}!",
kernel->get_kernel_type());

/* since an additional reference is returned */

return std::static_pointer_cast<ANOVAKernel>(kernel);
}
}
47 changes: 7 additions & 40 deletions src/shogun/kernel/ANOVAKernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class Distance;
class ANOVAKernel: public DotKernel
{
public:
/** default constructor */

ANOVAKernel();

/** constructor
Expand All @@ -54,42 +54,25 @@ class ANOVAKernel: public DotKernel

virtual ~ANOVAKernel();

/** initialize kernel with features
* @param l features left-side
* @param r features right-side
* @return true if successful
*/
virtual bool init(std::shared_ptr<Features> l, std::shared_ptr<Features> r);

/**
* @return kernel type
*/
virtual EKernelType get_kernel_type() { return K_ANOVA; }

/**
* @return type of features
*/
virtual EFeatureType get_feature_type() { return F_DREAL; }

/**
* @return class of features
*/
virtual EFeatureClass get_feature_class() { return C_DENSE; }

/**
* @return name of kernel
*/
virtual const char* get_name() const { return "ANOVAKernel"; }

/** getter for degree parameter
* @return kernel parameter cardinality
*/
inline int32_t get_cardinality() { return this->cardinality; }
int32_t get_cardinality() { return this->m_cardinality; }

/** setter for degree parameter
* @param value kernel parameter cardinality
*/
inline void set_cardinality(int32_t value) { this->cardinality = value; }
void set_cardinality(int32_t value) { this->m_cardinality = value; }

/** compute rec 1
* @param idx_a
Expand All @@ -105,32 +88,16 @@ class ANOVAKernel: public DotKernel
*/
float64_t compute_rec2(int32_t idx_a, int32_t idx_b);

/** Casts the given kernel to CANOVAKernel.
* @param kernel Kernel to cast. Must be CANOVAKernel. Might be NULL
* @return casted CANOVAKernel object, NULL if input was NULL
*/
static std::shared_ptr<ANOVAKernel> obtain_from_generic(const std::shared_ptr<Kernel>& kernel);
protected:

/**
* compute kernel for specific feature vectors
* corresponding to [idx_a] of left-side and [idx_b] of right-side
* @param idx_a left-side index
* @param idx_b right-side index
* @return kernel value
*/
virtual float64_t compute(int32_t idx_a, int32_t idx_b);

/** register params */
void register_params();

private:
float64_t compute_recursive1(float64_t* avec, float64_t* bvec, int32_t len);
float64_t compute_recursive2(float64_t* avec, float64_t* bvec, int32_t len);

protected:
virtual float64_t compute(int32_t idx_a, int32_t idx_b);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

minor, but might be worth leaving compute where it was? So that the class members are separate from the class functions?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see your point


protected:
/// degree parameter of kernel
int32_t cardinality;
int32_t m_cardinality = 1;
};
}

Expand Down
37 changes: 17 additions & 20 deletions src/shogun/kernel/AUCKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,23 +15,20 @@

using namespace shogun;

void AUCKernel::init()
AUCKernel::AUCKernel() : DotKernel(0)
{
SG_ADD(
&subkernel, "subkernel", "The subkernel.", ParameterProperties::HYPER);
SG_ADD(&labels, "labels", "The labels.");
&m_subkernel, "subkernel", "The subkernel.", ParameterProperties::HYPER);
SG_ADD(&m_labels, "labels", "The labels.");
watch_method("setup_auc_maximization", &AUCKernel::setup_auc_maximization);
}

AUCKernel::AUCKernel() : DotKernel(0), subkernel(nullptr), labels(nullptr)
{
init();
}

AUCKernel::AUCKernel(int32_t size, std::shared_ptr<Kernel> s, std::shared_ptr<Labels> l)
: DotKernel(size), subkernel(std::move(s)), labels(std::move(l))
: AUCKernel()
{
init();
set_cache_size(size);
m_subkernel = std::move(s);
m_labels = std::move(l);
}

AUCKernel::~AUCKernel()
Expand All @@ -42,14 +39,14 @@ AUCKernel::~AUCKernel()
bool AUCKernel::setup_auc_maximization()
{
io::info("setting up AUC maximization");
ASSERT(labels)
ASSERT(labels->get_label_type() == LT_BINARY)
labels->ensure_valid();
ASSERT(m_labels)
ASSERT(m_labels->get_label_type() == LT_BINARY)
m_labels->ensure_valid();

// get the original labels
SGVector<int32_t> int_labels = binary_labels(labels)->get_int_labels();
SGVector<int32_t> int_labels = binary_labels(m_labels)->get_int_labels();

ASSERT(subkernel->get_num_vec_rhs() == int_labels.vlen)
ASSERT(m_subkernel->get_num_vec_rhs() == int_labels.vlen)

// count positive and negative
int32_t num_pos = 0;
Expand Down Expand Up @@ -135,16 +132,16 @@ float64_t AUCKernel::compute(int32_t idx_a, int32_t idx_b)
ASSERT(alen == 2)
ASSERT(blen == 2)

ASSERT(subkernel && subkernel->has_features())
ASSERT(m_subkernel && m_subkernel->has_features())

float64_t k11, k12, k21, k22;
int32_t idx_a1 = avec[0], idx_a2 = avec[1], idx_b1 = bvec[0],
idx_b2 = bvec[1];

k11 = subkernel->kernel(idx_a1, idx_b1);
k12 = subkernel->kernel(idx_a1, idx_b2);
k21 = subkernel->kernel(idx_a2, idx_b1);
k22 = subkernel->kernel(idx_a2, idx_b2);
k11 = m_subkernel->kernel(idx_a1, idx_b1);
k12 = m_subkernel->kernel(idx_a1, idx_b2);
k21 = m_subkernel->kernel(idx_a2, idx_b1);
k22 = m_subkernel->kernel(idx_a2, idx_b2);

float64_t result = k11 + k22 - k21 - k12;
lhs->as<DenseFeatures<uint16_t>>()->free_feature_vector(avec, idx_a, afree);
Expand Down
63 changes: 8 additions & 55 deletions src/shogun/kernel/AUCKernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,8 @@ namespace shogun
*/
class AUCKernel : public DotKernel
{
void init();
public:
/** default constructor */

AUCKernel();

/** constructor
Expand All @@ -44,7 +43,6 @@ namespace shogun
*/
AUCKernel(int32_t size, std::shared_ptr<Kernel> subkernel, std::shared_ptr<Labels> labels);

/** destructor */
virtual ~AUCKernel();

/** initialize kernel based on current labeling and subkernel
Expand All @@ -54,66 +52,21 @@ namespace shogun
*/
bool setup_auc_maximization();

/** initialize kernel
*
* @param l features of left-hand side
* @param r features of right-hand side
* @return if initializing was successful
*/
virtual EKernelType get_kernel_type() { return K_AUC; }
virtual bool init(std::shared_ptr<Features> l, std::shared_ptr<Features> r);

/** return what type of kernel we are
*
* @return kernel type AUC
*/
virtual EKernelType get_kernel_type()
{
return K_AUC;
}

/** return the kernel's name
*
* @return name AUC
*/
virtual const char* get_name() const
{
return "AUCKernel";
}

/** return feature class the kernel can deal with
*
* @return feature class SIMPLE
*/
virtual EFeatureClass get_feature_class()
{
return C_DENSE;
jonpsy marked this conversation as resolved.
Show resolved Hide resolved
}
virtual const char* get_name() const { return "AUCKernel"; }

/** return feature type the kernel can deal with
*
* @return word feature type
*/
virtual EFeatureType get_feature_type()
{
return F_WORD;
}
virtual EFeatureClass get_feature_class() { return C_DENSE; }

virtual EFeatureType get_feature_type() { return F_WORD; }

protected:
/** compute kernel function for features a and b
* idx_{a,b} denote the index of the feature vectors
* in the corresponding feature object
*
* @param idx_a index a
* @param idx_b index b
* @return computed kernel function at indices a,b
*/
virtual float64_t compute(int32_t idx_a, int32_t idx_b);

protected:
/** the subkernel */
std::shared_ptr<Kernel> subkernel;
/** the labels */
std::shared_ptr<Labels> labels;
std::shared_ptr<Kernel> m_subkernel;
std::shared_ptr<Labels> m_labels;
};
} // namespace shogun
#endif /* _AUCKERNEL_H__ */
21 changes: 7 additions & 14 deletions src/shogun/kernel/ConstKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,22 +17,21 @@ using namespace shogun;
ConstKernel::ConstKernel()
: Kernel()
{
init();
SG_ADD(&m_const_val, "const_value", "Value for kernel elements.",
ParameterProperties::HYPER);
}

ConstKernel::ConstKernel(float64_t c)
: Kernel()
: ConstKernel()
{
init();
const_value=c;
m_const_val = c;
}

ConstKernel::ConstKernel(std::shared_ptr<Features> l, std::shared_ptr<Features> r, float64_t c)
: Kernel()
: ConstKernel(c)
{
init();
const_value=c;
init(std::move(l), std::move(r));
Kernel::init(l, r);
ASSERT(init_normalizer());
}

ConstKernel::~ConstKernel()
Expand All @@ -45,9 +44,3 @@ bool ConstKernel::init(std::shared_ptr<Features> l, std::shared_ptr<Features> r)
return init_normalizer();
}

void ConstKernel::init()
{
const_value=1.0;
SG_ADD(&const_value, "const_value", "Value for kernel elements.",
ParameterProperties::HYPER);
}
Loading