Skip to content

Commit

Permalink
Rename dot product functions (#23)
Browse files Browse the repository at this point in the history
  • Loading branch information
devfacet authored Apr 15, 2024
1 parent 606b613 commit 1e3db55
Show file tree
Hide file tree
Showing 21 changed files with 59 additions and 59 deletions.
2 changes: 1 addition & 1 deletion examples/arch/generic/layer/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ int main() {
}

// Set the dot product function of the layer
if (!nn_layer_set_dot_product_func(&layer, nn_dot_product, &error)) {
if (!nn_layer_set_dot_product_func(&layer, nn_dot_prod, &error)) {
fprintf(stderr, "error: %s\n", error.message);
return 1;
}
Expand Down
8 changes: 4 additions & 4 deletions include/arch/arm/cmsis-dsp/nn_dot_product.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@

#include <stddef.h>

// NN_DOT_PRODUCT_MAX_VECTOR_SIZE defines the maximum size of a vector.
#ifndef NN_DOT_PRODUCT_MAX_VECTOR_SIZE
#define NN_DOT_PRODUCT_MAX_VECTOR_SIZE 64
// NN_DOT_PROD_MAX_VECTOR_SIZE defines the maximum size of a vector.
#ifndef NN_DOT_PROD_MAX_VECTOR_SIZE
#define NN_DOT_PROD_MAX_VECTOR_SIZE 64
#endif

// nn_dot_product_cmsis calculates the dot product of two vectors.
float nn_dot_product_cmsis(const float a[NN_DOT_PRODUCT_MAX_VECTOR_SIZE], const float b[NN_DOT_PRODUCT_MAX_VECTOR_SIZE], size_t vector_size);
float nn_dot_product_cmsis(const float a[NN_DOT_PROD_MAX_VECTOR_SIZE], const float b[NN_DOT_PROD_MAX_VECTOR_SIZE], size_t vector_size);

#endif // NN_DOT_PRODUCT_CMSIS_H
8 changes: 4 additions & 4 deletions include/arch/arm/neon/nn_dot_product.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@

#include <stddef.h>

// NN_DOT_PRODUCT_MAX_VECTOR_SIZE defines the maximum size of a vector.
#ifndef NN_DOT_PRODUCT_MAX_VECTOR_SIZE
#define NN_DOT_PRODUCT_MAX_VECTOR_SIZE 64
// NN_DOT_PROD_MAX_VECTOR_SIZE defines the maximum size of a vector.
#ifndef NN_DOT_PROD_MAX_VECTOR_SIZE
#define NN_DOT_PROD_MAX_VECTOR_SIZE 64
#endif

// nn_dot_product_neon calculates the dot product of two vectors.
float nn_dot_product_neon(const float a[NN_DOT_PRODUCT_MAX_VECTOR_SIZE], const float b[NN_DOT_PRODUCT_MAX_VECTOR_SIZE], size_t vector_size);
float nn_dot_product_neon(const float a[NN_DOT_PROD_MAX_VECTOR_SIZE], const float b[NN_DOT_PROD_MAX_VECTOR_SIZE], size_t vector_size);

#endif // NN_DOT_PRODUCT_NEON_H
14 changes: 7 additions & 7 deletions include/nn_dot_product.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@

#include <stddef.h>

// NN_DOT_PRODUCT_MAX_VECTOR_SIZE defines the maximum size of a vector.
#ifndef NN_DOT_PRODUCT_MAX_VECTOR_SIZE
#define NN_DOT_PRODUCT_MAX_VECTOR_SIZE 64
// NN_DOT_PROD_MAX_VECTOR_SIZE defines the maximum size of a vector.
#ifndef NN_DOT_PROD_MAX_VECTOR_SIZE
#define NN_DOT_PROD_MAX_VECTOR_SIZE 64
#endif

// NNDotProductFunction represents a function that calculates the dot product of two vectors.
typedef float (*NNDotProductFunction)(const float a[NN_DOT_PRODUCT_MAX_VECTOR_SIZE], const float b[NN_DOT_PRODUCT_MAX_VECTOR_SIZE], size_t vector_size);
// NNDotProdFunc represents a function that calculates the dot product of two vectors.
typedef float (*NNDotProdFunc)(const float a[NN_DOT_PROD_MAX_VECTOR_SIZE], const float b[NN_DOT_PROD_MAX_VECTOR_SIZE], size_t vector_size);

// nn_dot_product calculates the dot product of two vectors.
float nn_dot_product(const float a[NN_DOT_PRODUCT_MAX_VECTOR_SIZE], const float b[NN_DOT_PRODUCT_MAX_VECTOR_SIZE], size_t vector_size);
// nn_dot_prod calculates the dot product of two vectors.
float nn_dot_prod(const float a[NN_DOT_PROD_MAX_VECTOR_SIZE], const float b[NN_DOT_PROD_MAX_VECTOR_SIZE], size_t vector_size);

#endif // NN_DOT_PRODUCT_H
4 changes: 2 additions & 2 deletions include/nn_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ typedef struct {
size_t output_size;
float weights[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE];
float biases[NN_LAYER_MAX_BIASES];
NNDotProductFunction dot_product_func;
NNDotProdFunc dot_product_func;
} NNLayer;

// nn_layer_init initializes a layer with the given arguments.
Expand All @@ -58,7 +58,7 @@ bool nn_layer_set_weights(NNLayer *layer, const float weights[NN_LAYER_MAX_OUTPU
bool nn_layer_set_biases(NNLayer *layer, const float biases[NN_LAYER_MAX_BIASES], NNError *error);

// nn_layer_set_dot_product_func sets the dot product function of the given layer.
bool nn_layer_set_dot_product_func(NNLayer *layer, NNDotProductFunction dot_product_func, NNError *error);
bool nn_layer_set_dot_product_func(NNLayer *layer, NNDotProdFunc dot_product_func, NNError *error);

// nn_layer_forward computes the given layer with the given inputs and stores the result in outputs.
bool nn_layer_forward(const NNLayer *layer, const float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE], float outputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_OUTPUT_SIZE], size_t batch_size, NNError *error);
Expand Down
6 changes: 3 additions & 3 deletions include/nn_neuron.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ typedef struct {
float weights[NN_NEURON_MAX_WEIGHTS];
size_t input_size;
float bias;
NNDotProductFunction dot_product_func;
NNDotProdFunc dot_product_func;
NNActFuncScalar act_func;
} NNNeuron;

Expand All @@ -32,8 +32,8 @@ bool nn_neuron_set_weights(NNNeuron *neuron, const float weights[NN_NEURON_MAX_W
// nn_neuron_set_bias sets the bias of the given neuron.
bool nn_neuron_set_bias(NNNeuron *neuron, float bias, NNError *error);

// nn_neuron_set_dot_product_func sets the dot product function of the given neuron.
bool nn_neuron_set_dot_product_func(NNNeuron *neuron, NNDotProductFunction dot_product_func, NNError *error);
// nn_neuron_set_dot_prod_func sets the dot product function of the given neuron.
bool nn_neuron_set_dot_prod_func(NNNeuron *neuron, NNDotProdFunc dot_product_func, NNError *error);

// nn_neuron_set_act_func sets the activation function of the given neuron.
bool nn_neuron_set_act_func(NNNeuron *neuron, NNActFuncScalar act_func, NNError *error);
Expand Down
2 changes: 1 addition & 1 deletion src/arch/arm/cmsis-dsp/nn_dot_product.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#include <stddef.h>

// nn_dot_product_neon calculates the dot product of two vectors.
float nn_dot_product_cmsis(const float a[NN_DOT_PRODUCT_MAX_VECTOR_SIZE], const float b[NN_DOT_PRODUCT_MAX_VECTOR_SIZE], size_t vector_size) {
float nn_dot_product_cmsis(const float a[NN_DOT_PROD_MAX_VECTOR_SIZE], const float b[NN_DOT_PROD_MAX_VECTOR_SIZE], size_t vector_size) {
NN_DEBUG_PRINT(5, "function %s called with vector_size = %zu\n", __func__, vector_size);

float result = 0.0f;
Expand Down
2 changes: 1 addition & 1 deletion src/arch/arm/neon/nn_dot_product.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#include <stddef.h>

// nn_dot_product_neon calculates the dot product of two vectors.
float nn_dot_product_neon(const float a[NN_DOT_PRODUCT_MAX_VECTOR_SIZE], const float b[NN_DOT_PRODUCT_MAX_VECTOR_SIZE], size_t vector_size) {
float nn_dot_product_neon(const float a[NN_DOT_PROD_MAX_VECTOR_SIZE], const float b[NN_DOT_PROD_MAX_VECTOR_SIZE], size_t vector_size) {
NN_DEBUG_PRINT(5, "function %s called with vector_size = %zu\n", __func__, vector_size);

// Initialize vector sum to 0
Expand Down
2 changes: 1 addition & 1 deletion src/nn_dot_product.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
#include <stddef.h>

// nn_dot_product_neon calculates the dot product of two vectors.
float nn_dot_product(const float a[NN_DOT_PRODUCT_MAX_VECTOR_SIZE], const float b[NN_DOT_PRODUCT_MAX_VECTOR_SIZE], size_t vector_size) {
float nn_dot_prod(const float a[NN_DOT_PROD_MAX_VECTOR_SIZE], const float b[NN_DOT_PROD_MAX_VECTOR_SIZE], size_t vector_size) {
NN_DEBUG_PRINT(5, "function %s called with vector_size = %zu\n", __func__, vector_size);

// Initialize vector sum to 0
Expand Down
2 changes: 1 addition & 1 deletion src/nn_layer.c
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ bool nn_layer_set_biases(NNLayer *layer, const float biases[NN_LAYER_MAX_BIASES]
}

// nn_layer_set_dot_product_func sets the dot product function of the given layer.
bool nn_layer_set_dot_product_func(NNLayer *layer, NNDotProductFunction dot_product_func, NNError *error) {
bool nn_layer_set_dot_product_func(NNLayer *layer, NNDotProdFunc dot_product_func, NNError *error) {
nn_error_set(error, NN_ERROR_NONE, NULL);
if (layer == NULL) {
nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "layer is NULL");
Expand Down
4 changes: 2 additions & 2 deletions src/nn_neuron.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ bool nn_neuron_set_bias(NNNeuron *neuron, float bias, NNError *error) {
return true;
}

// nn_neuron_set_dot_product_func sets the dot product function of the given neuron.
bool nn_neuron_set_dot_product_func(NNNeuron *neuron, NNDotProductFunction dot_product_func, NNError *error) {
// nn_neuron_set_dot_prod_func sets the dot product function of the given neuron.
bool nn_neuron_set_dot_prod_func(NNNeuron *neuron, NNDotProdFunc dot_product_func, NNError *error) {
nn_error_set(error, NN_ERROR_NONE, NULL);
if (neuron == NULL) {
nn_error_set(error, NN_ERROR_INVALID_INSTANCE, "neuron is NULL");
Expand Down
6 changes: 3 additions & 3 deletions tests/arch/arm/cmsis-dsp/neuron/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,21 @@ typedef struct {
float weights[NN_NEURON_MAX_WEIGHTS];
size_t input_size;
float bias;
NNDotProductFunction dot_product_func;
NNDotProdFunc dot_product_func;
float output_tolerance;
float expected_output;
} TestCase;

// run_test_cases runs the test cases.
void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProductFunction dot_product_func) {
void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_product_func) {
for (int i = 0; i < n_cases; ++i) {
TestCase tc = test_cases[i];
NNNeuron neuron;
NNError error;

nn_neuron_init(&neuron, tc.weights, tc.input_size, tc.bias, &error);
assert(error.code == NN_ERROR_NONE);
nn_neuron_set_dot_product_func(&neuron, dot_product_func, &error);
nn_neuron_set_dot_prod_func(&neuron, dot_product_func, &error);
assert(error.code == NN_ERROR_NONE);
nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error);
assert(error.code == NN_ERROR_NONE);
Expand Down
2 changes: 1 addition & 1 deletion tests/arch/arm/cmsis-dsp/neuron_perf/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ int main(int argc, char *argv[]) {
} else if (!nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error)) {
printf("error: %s\n", error.message);
return 1;
} else if (!nn_neuron_set_dot_product_func(&neuron, nn_dot_product_cmsis, &error)) {
} else if (!nn_neuron_set_dot_prod_func(&neuron, nn_dot_product_cmsis, &error)) {
printf("error: %s\n", error.message);
return 1;
}
Expand Down
6 changes: 3 additions & 3 deletions tests/arch/arm/neon/neuron/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,21 @@ typedef struct {
float weights[NN_NEURON_MAX_WEIGHTS];
size_t input_size;
float bias;
NNDotProductFunction dot_product_func;
NNDotProdFunc dot_product_func;
float output_tolerance;
float expected_output;
} TestCase;

// run_test_cases runs the test cases.
void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProductFunction dot_product_func) {
void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_product_func) {
for (int i = 0; i < n_cases; ++i) {
TestCase tc = test_cases[i];
NNNeuron neuron;
NNError error;

nn_neuron_init(&neuron, tc.weights, tc.input_size, tc.bias, &error);
assert(error.code == NN_ERROR_NONE);
nn_neuron_set_dot_product_func(&neuron, dot_product_func, &error);
nn_neuron_set_dot_prod_func(&neuron, dot_product_func, &error);
assert(error.code == NN_ERROR_NONE);
nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error);
assert(error.code == NN_ERROR_NONE);
Expand Down
2 changes: 1 addition & 1 deletion tests/arch/arm/neon/neuron_perf/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ int main(int argc, char *argv[]) {
} else if (!nn_neuron_set_act_func(&neuron, nn_act_func_identity, &error)) {
printf("error: %s\n", error.message);
return 1;
} else if (!nn_neuron_set_dot_product_func(&neuron, nn_dot_product_neon, &error)) {
} else if (!nn_neuron_set_dot_prod_func(&neuron, nn_dot_product_neon, &error)) {
printf("error: %s\n", error.message);
return 1;
}
Expand Down
6 changes: 3 additions & 3 deletions tests/arch/generic/dot_product/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@ typedef struct {
float b[4];
size_t vector_size;
float bias;
NNDotProductFunction dot_product_func;
NNDotProdFunc dot_product_func;
float output_tolerance;
float expected_output;
} TestCase;

// run_test_cases runs the test cases.
void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProductFunction dot_product_func) {
void run_test_cases(TestCase *test_cases, int n_cases, char *info, NNDotProdFunc dot_product_func) {
for (int i = 0; i < n_cases; ++i) {
TestCase tc = test_cases[i];

Expand Down Expand Up @@ -68,6 +68,6 @@ int main() {
},

};
run_test_cases(test_cases, N_TEST_CASES, "nn_dot_product", nn_dot_product);
run_test_cases(test_cases, N_TEST_CASES, "nn_dot_prod", nn_dot_prod);
return 0;
}
4 changes: 2 additions & 2 deletions tests/arch/generic/dot_product_perf/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@ int main(int argc, char *argv[]) {
// Benchmark
for (int i = 0; i < n_runs; ++i) {
clock_gettime(CLOCK_MONOTONIC, &start);
nn_dot_product(a, b, n_vectors);
nn_dot_prod(a, b, n_vectors);
clock_gettime(CLOCK_MONOTONIC, &end);
total_time += nn_timespec_diff_ns(&start, &end);
}
printf("avg_time_ns=%lld total_time_ms=%lld info=nn_dot_product\n", total_time / n_runs, total_time / 1000000);
printf("avg_time_ns=%lld total_time_ms=%lld info=nn_dot_prod\n", total_time / n_runs, total_time / 1000000);

return 0;
}
20 changes: 10 additions & 10 deletions tests/arch/generic/layer/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ typedef struct {
size_t output_size;
float weights[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE];
float biases[NN_LAYER_MAX_BIASES];
NNDotProductFunction dot_product_func;
NNDotProdFunc dot_product_func;
NNActFuncScalar act_func_scalar;
NNActFuncVector act_func_vector;
size_t batch_size;
Expand Down Expand Up @@ -77,7 +77,7 @@ int main() {
{-0.3f, 0.4f, 0.2f, -0.5f},
},
.biases = {0.5f, -0.1f, 0.2f},
.dot_product_func = nn_dot_product,
.dot_product_func = nn_dot_prod,
.act_func_scalar = nn_act_func_identity,
.batch_size = 2,
.inputs = {
Expand All @@ -100,7 +100,7 @@ int main() {
{-0.3f, 0.4f, 0.2f, -0.5f},
},
.biases = {0.5f, -0.1f, 0.2f},
.dot_product_func = nn_dot_product,
.dot_product_func = nn_dot_prod,
.act_func_scalar = nn_act_func_relu,
.batch_size = 2,
.inputs = {
Expand All @@ -123,7 +123,7 @@ int main() {
{-0.3f, 0.4f, 0.2f, -0.5f},
},
.biases = {0.5f, -0.1f, 0.2f},
.dot_product_func = nn_dot_product,
.dot_product_func = nn_dot_prod,
.act_func_vector = nn_act_func_softmax,
.batch_size = 2,
.inputs = {
Expand All @@ -146,7 +146,7 @@ int main() {
{0.4f, 0.1f, -0.4f, 0.6f},
},
.biases = {1.0f, 0.5f, -0.2f},
.dot_product_func = nn_dot_product,
.dot_product_func = nn_dot_prod,
.act_func_scalar = nn_act_func_identity,
.batch_size = 2,
.inputs = {
Expand All @@ -169,7 +169,7 @@ int main() {
{0.4f, 0.1f, -0.4f, 0.6f},
},
.biases = {1.0f, 0.5f, -0.2f},
.dot_product_func = nn_dot_product,
.dot_product_func = nn_dot_prod,
.act_func_scalar = nn_act_func_relu,
.batch_size = 2,
.inputs = {
Expand All @@ -192,7 +192,7 @@ int main() {
{0.4f, 0.1f, -0.4f, 0.6f},
},
.biases = {1.0f, 0.5f, -0.2f},
.dot_product_func = nn_dot_product,
.dot_product_func = nn_dot_prod,
.act_func_vector = nn_act_func_softmax,
.batch_size = 2,
.inputs = {
Expand All @@ -215,7 +215,7 @@ int main() {
{0.1f, 0.4f, 0.2f, -0.2f},
},
.biases = {0.2f, -0.3f, 0.4f},
.dot_product_func = nn_dot_product,
.dot_product_func = nn_dot_prod,
.act_func_scalar = nn_act_func_identity,
.batch_size = 3,
.inputs = {
Expand All @@ -240,7 +240,7 @@ int main() {
{0.1f, 0.4f, 0.2f, -0.2f},
},
.biases = {0.2f, -0.3f, 0.4f},
.dot_product_func = nn_dot_product,
.dot_product_func = nn_dot_prod,
.act_func_vector = nn_act_func_softmax,
.batch_size = 3,
.inputs = {
Expand All @@ -265,7 +265,7 @@ int main() {
{0.1f, 0.4f, 0.2f, -0.2f},
},
.biases = {0.2f, -0.3f, 0.4f},
.dot_product_func = nn_dot_product,
.dot_product_func = nn_dot_prod,
.act_func_scalar = nn_act_func_relu,
.batch_size = 3,
.inputs = {
Expand Down
8 changes: 4 additions & 4 deletions tests/arch/generic/layer_multi/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ typedef struct {
float biases[NN_LAYER_MAX_BIASES];
float weights2[NN_LAYER_MAX_OUTPUT_SIZE][NN_LAYER_MAX_INPUT_SIZE];
float biases2[NN_LAYER_MAX_BIASES];
NNDotProductFunction dot_product_func;
NNDotProdFunc dot_product_func;
NNActFuncScalar act_func_scalar;
size_t batch_size;
float inputs[NN_LAYER_MAX_BATCH_SIZE][NN_LAYER_MAX_INPUT_SIZE];
Expand Down Expand Up @@ -86,7 +86,7 @@ int main() {
{0.5f, -0.9f, 0.1f},
},
.biases2 = {0.5f, 1.5f, -0.2f},
.dot_product_func = nn_dot_product,
.dot_product_func = nn_dot_prod,
.act_func_scalar = nn_act_func_identity,
.batch_size = 3,
.inputs = {
Expand Down Expand Up @@ -116,7 +116,7 @@ int main() {
{0.13f, -0.31f, 0.11f},
},
.biases2 = {-0.1f, 1.0f, 0.2f},
.dot_product_func = nn_dot_product,
.dot_product_func = nn_dot_prod,
.act_func_scalar = nn_act_func_identity,
.batch_size = 3,
.inputs = {
Expand Down Expand Up @@ -146,7 +146,7 @@ int main() {
{-0.35f, 0.62f, -0.2f},
},
.biases2 = {0.7f, -1.1f, 0.3f},
.dot_product_func = nn_dot_product,
.dot_product_func = nn_dot_prod,
.act_func_scalar = nn_act_func_identity,
.batch_size = 3,
.inputs = {
Expand Down
Loading

0 comments on commit 1e3db55

Please sign in to comment.