Skip to content

Commit 3b7612b

Browse files
Merge branch 'develop' of https://github.com/shogun-toolbox/shogun into develop.
2 parents 66247d7 + 9aadc70 commit 3b7612b

27 files changed

+238
-249
lines changed

examples/meta/generator/targets/python.json

+5-6
Original file line numberDiff line numberDiff line change
@@ -5,14 +5,13 @@
55
"IncludeInterfacedClasses": true,
66
"IncludeEnums": true,
77
"IncludeGlobalFunctions": true,
8-
"DependencyListElement": "from shogun import $typeName",
9-
"DependencyListElementEnum": "from shogun import $value",
8+
"DependencyListElement": "import shogun as sg",
109
"DependencyListSeparator": "\n"
1110
},
1211
"Statement": "$statement\n",
1312
"Comment": "#$comment\n",
1413
"Init": {
15-
"Construct": "$name = $typeName($arguments$kwargs)",
14+
"Construct": "$name = sg.$typeName($arguments$kwargs)",
1615
"Copy": "$name = $expr",
1716
"KeywordArguments": {
1817
"List": "$elements",
@@ -81,10 +80,10 @@
8180
"get_option": "$object.get($arguments)",
8281
"get_string": "$object.get($arguments)"
8382
},
84-
"StaticCall": "$typeName.$method($arguments)",
85-
"GlobalCall": "$method($arguments$kwargs)",
83+
"StaticCall": "sg.$typeName.$method($arguments)",
84+
"GlobalCall": "sg.$method($arguments$kwargs)",
8685
"Identifier": "$identifier",
87-
"Enum":"$value"
86+
"Enum":"sg.$value"
8887
},
8988
"Element": {
9089
"Access": {
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
LibSVMFile f_feats_a("@SHOGUN_DATA@/fm_train_sparsereal.dat")
2+
LibSVMFile f_feats_b("@SHOGUN_DATA@/fm_test_sparsereal.dat")
3+
4+
#![create_features]
5+
Features features_a = features(f_feats_a)
6+
Features features_b = features(f_feats_b)
7+
#![create_features]
8+
9+
#![create_instance]
10+
Distance d = distance("SparseEuclideanDistance", lhs=features_a, rhs=features_a)
11+
#![create_instance]
12+
13+
#![extract_distance]
14+
RealMatrix distance_matrix_aa = d.get_distance_matrix()
15+
#![extract_distance]
16+
17+
#![refresh_distance]
18+
d.init(features_a, features_b)
19+
RealMatrix distance_matrix_ab = d.get_distance_matrix()
20+
#![refresh_distance]
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
File f_feats_train = csv_file("@SHOGUN_DATA@/classifier_4class_2d_linear_features_train.dat")
2+
File f_feats_test = csv_file("@SHOGUN_DATA@/classifier_4class_2d_linear_features_test.dat")
3+
File f_labels_train = csv_file("@SHOGUN_DATA@/classifier_4class_2d_linear_labels_train.dat")
4+
File f_labels_test = csv_file("@SHOGUN_DATA@/classifier_4class_2d_linear_labels_test.dat")
5+
6+
#![create_features]
7+
Features feats_train = features(f_feats_train)
8+
Features feats_test = features(f_feats_test)
9+
Labels labels_train = labels(f_labels_train)
10+
Labels labels_test = labels(f_labels_test)
11+
#![create_features]
12+
13+
#![create_classifier]
14+
Machine svm= machine("MulticlassLibLinear", C=1.0, labels=labels_train)
15+
#![create_classifier]
16+
17+
#![train_and_apply]
18+
svm.train(feats_train)
19+
Labels predicted_labels = svm.apply(feats_test)
20+
#![train_and_apply]
21+
22+
#![evaluate_accuracy]
23+
Evaluation evaluator = evaluation("MulticlassAccuracy")
24+
real accuracy = evaluator.evaluate(predicted_labels,labels_test)
25+
#![evaluate_accuracy]
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
File f_feats_train = csv_file("@SHOGUN_DATA@/classifier_4class_2d_linear_features_train.dat")
2+
File f_labels_train = csv_file("@SHOGUN_DATA@/classifier_4class_2d_linear_labels_train.dat")
3+
4+
#![create_features]
5+
Features feats_train = features(f_feats_train)
6+
Labels labels_train = labels(f_labels_train)
7+
#![create_features]
8+
9+
#![create_classifier]
10+
Machine svm= machine("MulticlassLibLinear", C=1.0, labels=labels_train)
11+
#![create_classifier]
12+
13+
#![train_and_apply]
14+
svm.train(feats_train)
15+
Labels labels_predicted = svm.apply()
16+
RealVector labels = labels_predicted.get_real_vector("labels")
17+
#![train_and_apply]
18+
19+
#![evaluate_accuracy]
20+
Evaluation binary_evaluator = evaluation("ContingencyTableEvaluation", type="ACCURACY")
21+
Evaluation contingency_table_evaluator = evaluation("MulticlassOVREvaluation", binary_evaluation=binary_evaluator)
22+
real mean_accuracy = contingency_table_evaluator.evaluate(labels_predicted,labels_train)
23+
#![evaluate_accuracy]
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
CSVFile f_feats_train("@SHOGUN_DATA@/classifier_4class_2d_linear_features_train.dat")
2+
CSVFile f_feats_test("@SHOGUN_DATA@/classifier_4class_2d_linear_features_test.dat")
3+
CSVFile f_labels_train("@SHOGUN_DATA@/classifier_4class_2d_linear_labels_train.dat")
4+
5+
#![create_features]
6+
Features features_train = features(f_feats_train)
7+
Features features_test = features(f_feats_test)
8+
Labels labels_train = labels(f_labels_train)
9+
#![create_features]
10+
11+
#![create_kernel]
12+
Kernel k = kernel("GaussianKernel", log_width=0.0)
13+
#![create_kernel]
14+
15+
#![choose_strategy]
16+
MulticlassStrategy one_versus_rest=multiclass_strategy("MulticlassOneVsRestStrategy")
17+
#![choose_strategy]
18+
19+
#![create_classifier]
20+
Machine classifier = machine("LibSVM")
21+
#![create_classifier]
22+
23+
#![create_machine]
24+
Machine multiclass_machine = machine("KernelMulticlassMachine", multiclass_strategy=one_versus_rest, kernel=k, machine=classifier, labels=labels_train)
25+
#![create_machine]
26+
27+
#![train_and_apply]
28+
multiclass_machine.train(features_train)
29+
Labels labels_predict = multiclass_machine.apply(features_test)
30+
#![train_and_apply]
31+
32+
#![extract_labels]
33+
RealVector output = labels_predict.get_real_vector("labels")
34+
#![extract_labels]

examples/undocumented/python/classifier_multiclassmachine.py

-30
This file was deleted.

examples/undocumented/python/distance_sparseeuclidean.py

-26
This file was deleted.

examples/undocumented/python/evaluation_multiclassaccuracy.py

-28
This file was deleted.

examples/undocumented/python/evaluation_multiclassovrevaluation.py

-32
This file was deleted.

src/shogun/distributions/KernelDensity.cpp

+6-4
Original file line numberDiff line numberDiff line change
@@ -91,12 +91,14 @@ bool KernelDensity::train(std::shared_ptr<Features> data)
9191
return true;
9292
}
9393

94-
SGVector<float64_t> KernelDensity::get_log_density(const std::shared_ptr<DenseFeatures<float64_t>>& test, int32_t leaf_size)
94+
SGVector<float64_t> KernelDensity::get_log_density(const std::shared_ptr<Features>& test, int32_t leaf_size)
9595
{
9696
require(test,"data not supplied");
97+
auto dense_feat =std::dynamic_pointer_cast<DenseFeatures<float64_t>>(test);
98+
require(dense_feat,"Expected DenseFeatures<float64_t> type");
9799

98100
if ((m_eval==EM_KDTREE_SINGLE) || (m_eval==EM_BALLTREE_SINGLE))
99-
return tree->log_kernel_density(test->get_feature_matrix(),m_kernel_type,m_bandwidth,m_atol,m_rtol);
101+
return tree->log_kernel_density(dense_feat->get_feature_matrix(),m_kernel_type,m_bandwidth,m_atol,m_rtol);
100102

101103
std::shared_ptr<CNbodyTree> query_tree=NULL;
102104
if (m_eval==EM_KDTREE_DUAL)
@@ -106,7 +108,7 @@ SGVector<float64_t> KernelDensity::get_log_density(const std::shared_ptr<DenseFe
106108
else
107109
error("Evaluation mode not identified");
108110

109-
query_tree->build_tree(test);
111+
query_tree->build_tree(dense_feat);
110112
std::shared_ptr<BinaryTreeMachineNode<NbodyTreeNodeData>> qroot=NULL;
111113
auto root=query_tree->get_root();
112114
if (root)
@@ -115,7 +117,7 @@ SGVector<float64_t> KernelDensity::get_log_density(const std::shared_ptr<DenseFe
115117
error("Query tree root not found!");
116118

117119
SGVector<index_t> qid=query_tree->get_rearranged_vector_ids();
118-
SGVector<float64_t> ret=tree->log_kernel_density_dual(test->get_feature_matrix(),qid,qroot,m_kernel_type,m_bandwidth,m_atol,m_rtol);
120+
SGVector<float64_t> ret=tree->log_kernel_density_dual(dense_feat->get_feature_matrix(),qid,qroot,m_kernel_type,m_bandwidth,m_atol,m_rtol);
119121

120122

121123

src/shogun/distributions/KernelDensity.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ public :
9797
* @param leaf_size leaf size of query tree (ignored in case of single tree evaluation mode)
9898
* @return log of estimated kernel density velues at given test points
9999
*/
100-
SGVector<float64_t> get_log_density(const std::shared_ptr<DenseFeatures<float64_t>>& test, int32_t leaf_size=1);
100+
SGVector<float64_t> get_log_density(const std::shared_ptr<Features>& test, int32_t leaf_size=1);
101101

102102
/** return number of model parameters
103103
* NOT IMPLEMENTED

src/shogun/lib/SGVector.cpp

-24
Original file line numberDiff line numberDiff line change
@@ -338,30 +338,6 @@ SGVector<T> SGVector<T>::slice(index_t l, index_t h) const
338338
return SGVector<T>(vector, h - l, l);
339339
}
340340

341-
/** addition operator */
342-
template<class T>
343-
SGVector<T> SGVector<T>::operator+ (SGVector<T> x)
344-
{
345-
assert_on_cpu();
346-
require(x.vector && vector, "Addition possible for only non-null vectors.");
347-
require(x.vlen == vlen, "Length of the two vectors to be added should be same. [V({}) + V({})]", vlen, x.vlen);
348-
349-
SGVector<T> result=clone();
350-
result.add(x);
351-
return result;
352-
}
353-
354-
template<class T>
355-
void SGVector<T>::add(const SGVector<T> x)
356-
{
357-
assert_on_cpu();
358-
require(x.vector && vector, "Addition possible for only non-null vectors.");
359-
require(x.vlen == vlen, "Length of the two vectors to be added should be same. [V({}) + V({})]", vlen, x.vlen);
360-
361-
for (int32_t i=0; i<vlen; i++)
362-
vector[i]+=x.vector[i];
363-
}
364-
365341
template<class T>
366342
void SGVector<T>::add(const T x)
367343
{

src/shogun/lib/SGVector.h

-23
Original file line numberDiff line numberDiff line change
@@ -371,12 +371,6 @@ template<class T> class SGVector : public SGReferencedData
371371
return vector[index];
372372
}
373373

374-
/** Add vector to current vector
375-
*
376-
* @param x add vector x to current vector
377-
*/
378-
void add(const SGVector<T> x);
379-
380374
/** Add sparse vector to current vector
381375
*
382376
* @param x add sparse vector x to current vector
@@ -389,23 +383,6 @@ template<class T> class SGVector : public SGReferencedData
389383
*/
390384
void add(const T x);
391385

392-
/** Addition operator */
393-
SGVector<T> operator+ (SGVector<T> x);
394-
395-
/** Inplace addition operator */
396-
SGVector<T> operator+= (SGVector<T> x)
397-
{
398-
add(x);
399-
return *this;
400-
}
401-
402-
/** Inplace addition operator for sparse vector */
403-
SGVector<T> operator+= (SGSparseVector<T>& x)
404-
{
405-
add(x);
406-
return *this;
407-
}
408-
409386
/** Equals method up to precision for vectors (element-wise)
410387
* @param other vector to compare with
411388
* @return false if any element differs or if sizes are different,

src/shogun/mathematics/linalg/LinalgBackendEigen.h

+2
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,8 @@ namespace shogun
5555
Type beta, Container<Type>& result) const;
5656
DEFINE_FOR_NUMERIC_PTYPE(BACKEND_GENERIC_IN_PLACE_ADD, SGVector)
5757
DEFINE_FOR_NUMERIC_PTYPE(BACKEND_GENERIC_IN_PLACE_ADD, SGMatrix)
58+
BACKEND_GENERIC_IN_PLACE_ADD(complex128_t, SGVector);
59+
BACKEND_GENERIC_IN_PLACE_ADD(complex128_t, SGMatrix);
5860
#undef BACKEND_GENERIC_IN_PLACE_ADD
5961

6062
/** Implementation of @see LinalgBackendBase::add_col_vec */

src/shogun/mathematics/linalg/backend/eigen/BasicOps.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,8 @@ using namespace shogun;
4545
}
4646
DEFINE_FOR_NUMERIC_PTYPE(BACKEND_GENERIC_IN_PLACE_ADD, SGVector)
4747
DEFINE_FOR_NUMERIC_PTYPE(BACKEND_GENERIC_IN_PLACE_ADD, SGMatrix)
48+
BACKEND_GENERIC_IN_PLACE_ADD(complex128_t, SGVector);
49+
BACKEND_GENERIC_IN_PLACE_ADD(complex128_t, SGMatrix);
4850
#undef BACKEND_GENERIC_IN_PLACE_ADD
4951

5052
#define BACKEND_GENERIC_ADD_COL_VEC(Type, Container) \

src/shogun/mathematics/linalg/ratapprox/logdet/opfunc/LogRationalApproximationIndividual.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ LogRationalApproximationIndividual::compute(SGVector<float64_t> sample) const
127127
v *= m_weights[i];
128128
v = -v;
129129
// aggregate the result
130-
agg += vec;
130+
linalg::add(agg, vec, agg);
131131
}
132132
float64_t result =
133133
linalg::dot(sample, m_linear_operator->apply(agg.get_imag()));

0 commit comments

Comments
 (0)