Skip to content

Commit

Permalink
Improve tests (#29)
Browse files Browse the repository at this point in the history
* Improve tests

* Add _POSIX_C_SOURCE back
  • Loading branch information
devfacet committed Jul 15, 2024
1 parent efe5241 commit 7398af7
Show file tree
Hide file tree
Showing 38 changed files with 958 additions and 20 deletions.
34 changes: 34 additions & 0 deletions scripts/test/gen/nn_act_func.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# This script generates test cases for nn_act_func_softmax function.

import numpy as np

# Returns the softmax activation function result.
def nn_act_func_softmax(x):
exp_x = np.exp(x - np.max(x, axis=-1, keepdims=True))
return exp_x / np.sum(exp_x, axis=-1, keepdims=True)

# Generates a test case.
def generate_test_case(input):
input_c = ", ".join(map(str, input.flatten()))
expected_value = nn_act_func_softmax(input)
expected_value_c = ", ".join(map(str, expected_value.flatten()))
return f"""
{{
.act_func = nn_act_func_init(NN_ACT_FUNC_TENSOR, nn_act_func_softmax),
.input = nn_tensor_init_NNTensor(2, (const size_t[]){{1, {len(input)}}}, false, (const NNTensorUnit[]){{{input_c}}}, NULL),
.expected_value = nn_tensor_init_NNTensor(2, (const size_t[]){{1, {len(input)}}}, false, (const NNTensorUnit[]){{{expected_value_c}}}, NULL),
.expected_tolerance = default_expected_tolerance,
}}"""

# Generate test cases
np.random.seed(2024)
test_cases = []
inputs = [
np.array([0.8, 0.2, 0.1]),
np.array([-0.6, 0.0, 0.6]),
np.array([0.3, -0.3, 0.0])
]
for input in inputs:
test_cases.append(generate_test_case(input))

print(f"TestCase test_cases[] = {{{', '.join(test_cases)},\n}};")
22 changes: 22 additions & 0 deletions scripts/test/gen/nn_act_func_identity.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# This script generates test cases for nn_act_func_identity function.

import numpy as np

# Generates a test case.
def generate_test_case(input):
return f"""
{{
.act_func = nn_act_func_init(NN_ACT_FUNC_SCALAR, nn_act_func_identity),
.input = {input},
.expected_value = {input},
.expected_tolerance = default_expected_tolerance,
}}"""

# Generate test cases
np.random.seed(2024)
test_cases = []
inputs = [-1.0, 0.0, 1.0]
for input in inputs:
test_cases.append(generate_test_case(input))

print(f"TestCase test_cases[] = {{{', '.join(test_cases)},\n}};")
27 changes: 27 additions & 0 deletions scripts/test/gen/nn_act_func_relu.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# This script generates test cases for nn_act_func_relu function.

import numpy as np

# Returns the ReLU activation function result.
def nn_act_func_relu(x):
return np.maximum(0, x)

# Generates a test case.
def generate_test_case(input):
expected_value = nn_act_func_relu(input)
return f"""
{{
.act_func = nn_act_func_init(NN_ACT_FUNC_SCALAR, nn_act_func_relu),
.input = {input},
.expected_value = {expected_value},
.expected_tolerance = default_expected_tolerance,
}}"""

# Generate test cases
np.random.seed(2024)
test_cases = []
inputs = [-2.0, -1.0, 0.0, 1.0, 2.0]
for input in inputs:
test_cases.append(generate_test_case(input))

print(f"TestCase test_cases[] = {{{', '.join(test_cases)},\n}};")
33 changes: 33 additions & 0 deletions scripts/test/gen/nn_act_func_scalar_batch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# This script generates test cases for nn_act_func_softmax function.

import numpy as np

# Returns the ReLU activation function result.
def nn_act_func_relu(x):
return np.maximum(0, x)

# Generates a test case.
def generate_test_case(input):
input_c = ", ".join(map(str, input.flatten()))
expected_value = nn_act_func_relu(input)
expected_value_c = ", ".join(map(str, expected_value.flatten()))
return f"""
{{
.act_func = nn_act_func_init(NN_ACT_FUNC_TENSOR, nn_act_func_relu),
.input = nn_tensor_init_NNTensor(2, (const size_t[]){{1, {len(input)}}}, false, (const NNTensorUnit[]){{{input_c}}}, NULL),
.expected_value = nn_tensor_init_NNTensor(2, (const size_t[]){{1, {len(input)}}}, false, (const NNTensorUnit[]){{{expected_value_c}}}, NULL),
.expected_tolerance = default_expected_tolerance,
}}"""

# Generate test cases
np.random.seed(2024)
test_cases = []
inputs = [
np.array([0.8, 0.2, -0.1, 0.0]),
np.array([-0.6, 0.0, 0.6, -1.0]),
np.array([0.3, -0.3, 1.0, 0.0])
]
for input in inputs:
test_cases.append(generate_test_case(input))

print(f"TestCase test_cases[] = {{{', '.join(test_cases)},\n}};")
27 changes: 27 additions & 0 deletions scripts/test/gen/nn_act_func_sigmoid.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# This script generates test cases for nn_act_func_sigmoid function.

import numpy as np

# Returns the sigmoid activation function result.
def nn_act_func_sigmoid(x):
return 1 / (1 + np.exp(-x))

# Generates test cases.
def generate_test_case(input):
expected_value = nn_act_func_sigmoid(input)
return f"""
{{
.act_func = nn_act_func_init(NN_ACT_FUNC_SCALAR, nn_act_func_sigmoid),
.input = {input},
.expected_value = {expected_value},
.expected_tolerance = default_expected_tolerance,
}}"""

# Generate test cases
np.random.seed(2024)
test_cases = []
inputs = [-2.0, -1.0, 0.0, 1.0, 2.0]
for input in inputs:
test_cases.append(generate_test_case(input))

print(f"TestCase test_cases[] = {{{', '.join(test_cases)},\n}};")
34 changes: 34 additions & 0 deletions scripts/test/gen/nn_act_func_softmax.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# This script generates test cases for nn_act_func_softmax function.

import numpy as np

# Returns the softmax activation function result.
def nn_act_func_softmax(x):
exp_x = np.exp(x - np.max(x, axis=-1, keepdims=True))
return exp_x / np.sum(exp_x, axis=-1, keepdims=True)

# Generates a test case.
def generate_test_case(input):
input_c = ", ".join(map(str, input.flatten()))
expected_value = nn_act_func_softmax(input)
expected_value_c = ", ".join(map(str, expected_value.flatten()))
return f"""
{{
.act_func = nn_act_func_init(NN_ACT_FUNC_TENSOR, nn_act_func_softmax),
.input = nn_tensor_init_NNTensor(1, (const size_t[]){{{len(input)}}}, false, (const NNTensorUnit[]){{{input_c}}}, NULL),
.expected_value = nn_tensor_init_NNTensor(1, (const size_t[]){{{len(input)}}}, false, (const NNTensorUnit[]){{{expected_value_c}}}, NULL),
.expected_tolerance = default_expected_tolerance,
}}"""

# Generate test cases
np.random.seed(2024)
test_cases = []
inputs = [
np.array([1.0, 2.0, 3.0]),
np.array([-1.0, 0.0, 1.0]),
np.array([0.5, -0.5, 0.0])
]
for input in inputs:
test_cases.append(generate_test_case(input))

print(f"TestCase test_cases[] = {{{', '.join(test_cases)},\n}};")
34 changes: 34 additions & 0 deletions scripts/test/gen/nn_act_func_tensor_batch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# This script generates test cases for nn_act_func_softmax function.

import numpy as np

# Returns the softmax activation function result.
def nn_act_func_softmax(x):
exp_x = np.exp(x - np.max(x, axis=-1, keepdims=True))
return exp_x / np.sum(exp_x, axis=-1, keepdims=True)

# Generates a test case.
def generate_test_case(input):
input_c = ", ".join(map(str, input.flatten()))
expected_value = nn_act_func_softmax(input)
expected_value_c = ", ".join(map(str, expected_value.flatten()))
return f"""
{{
.act_func = nn_act_func_init(NN_ACT_FUNC_TENSOR, nn_act_func_softmax),
.input = nn_tensor_init_NNTensor(2, (const size_t[]){{1, {len(input)}}}, false, (const NNTensorUnit[]){{{input_c}}}, NULL),
.expected_value = nn_tensor_init_NNTensor(2, (const size_t[]){{1, {len(input)}}}, false, (const NNTensorUnit[]){{{expected_value_c}}}, NULL),
.expected_tolerance = default_expected_tolerance,
}}"""

# Generate test cases
np.random.seed(2024)
test_cases = []
inputs = [
np.array([-0.1, 0.2, 0.8, 0.0]),
np.array([1.0, 0.5, -0.4, -1.0]),
np.array([0.9, -0.3, 0.1, 0.0])
]
for input in inputs:
test_cases.append(generate_test_case(input))

print(f"TestCase test_cases[] = {{{', '.join(test_cases)},\n}};")
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# This script generates test cases for NNLayer.
# This script generates test cases for NNLayer struct.

import numpy as np

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# This script generates test cases for NNLayer.
# This script generates test cases for NNLayer struct.

import numpy as np

Expand Down
4 changes: 0 additions & 4 deletions src/nn_activation.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@
#include <stdbool.h>
#include <stddef.h>

// TODO: Add tests

NNActFunc nn_act_func_init(NNActFuncType type, void *func) {
NN_DEBUG_PRINT(5, "function %s called with type=%d\n", __func__, type);

Expand Down Expand Up @@ -138,11 +136,9 @@ bool nn_act_func_tensor_batch(const NNActFuncTensor act_func, const NNTensor *in
size_t sizes[1] = {sample_size};
NNTensor input_slice;
NNTensor output_slice;

for (size_t i = 0; i < batch_size; i++) {
nn_tensor_slice(input, i * sample_size, sizes, &input_slice);
nn_tensor_slice(output, i * sample_size, sizes, &output_slice);

if (!act_func(&input_slice, &output_slice, error)) {
return false;
}
Expand Down
3 changes: 3 additions & 0 deletions src/nn_layer.c
Original file line number Diff line number Diff line change
Expand Up @@ -172,12 +172,15 @@ bool nn_layer_forward(const NNLayer *layer, const NNTensor *inputs, NNTensor *ou
}
// TODO: Should we overwrite the weights tensor so that we don't have to allocate a new tensor every time?
if (!nn_mat_transpose(layer->weights, weights, error)) {
nn_tensor_destroy_NNTensor(weights);
return false;
}
// Perform matrix multiplication
if (!layer->mat_mul_func(inputs, weights, outputs, error)) {
nn_tensor_destroy_NNTensor(weights);
return false;
}
nn_tensor_destroy_NNTensor(weights);
} else {
// Perform matrix multiplication
if (!layer->mat_mul_func(inputs, layer->weights, outputs, error)) {
Expand Down
8 changes: 4 additions & 4 deletions tests/arch/arm/cmsis-dsp/dot_prod_perf/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,13 @@ int main(int argc, char *argv[]) {
struct timespec start, end;
long long total_time = 0;
const int batch_size = 1024;
const int n_vectors = 4096;
const int vector_size = 4096;
NNTensor *vec_a[batch_size];
NNTensor *vec_b[batch_size];
for (int i = 0; i < batch_size; i++) {
vec_a[i] = nn_tensor_init_NNTensor(1, (const size_t[]){4096}, false, NULL, NULL);
vec_b[i] = nn_tensor_init_NNTensor(1, (const size_t[]){4096}, false, NULL, NULL);
for (int j = 0; j < n_vectors; ++j) {
vec_a[i] = nn_tensor_init_NNTensor(1, (const size_t[]){vector_size}, false, NULL, NULL);
vec_b[i] = nn_tensor_init_NNTensor(1, (const size_t[]){vector_size}, false, NULL, NULL);
for (int j = 0; j < vector_size; ++j) {
vec_a[i]->data[j] = (NNTensorUnit)rand() / (NNTensorUnit)RAND_MAX;
vec_b[i]->data[j] = (NNTensorUnit)rand() / (NNTensorUnit)RAND_MAX;
}
Expand Down
8 changes: 4 additions & 4 deletions tests/arch/arm/neon/dot_prod_perf/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,13 @@ int main(int argc, char *argv[]) {
struct timespec start, end;
long long total_time = 0;
const int batch_size = 1024;
const int n_vectors = 4096;
const int vector_size = 4096;
NNTensor *vec_a[batch_size];
NNTensor *vec_b[batch_size];
for (int i = 0; i < batch_size; i++) {
vec_a[i] = nn_tensor_init_NNTensor(1, (const size_t[]){4096}, false, NULL, NULL);
vec_b[i] = nn_tensor_init_NNTensor(1, (const size_t[]){4096}, false, NULL, NULL);
for (int j = 0; j < n_vectors; ++j) {
vec_a[i] = nn_tensor_init_NNTensor(1, (const size_t[]){vector_size}, false, NULL, NULL);
vec_b[i] = nn_tensor_init_NNTensor(1, (const size_t[]){vector_size}, false, NULL, NULL);
for (int j = 0; j < vector_size; ++j) {
vec_a[i]->data[j] = (NNTensorUnit)rand() / (NNTensorUnit)RAND_MAX;
vec_b[i]->data[j] = (NNTensorUnit)rand() / (NNTensorUnit)RAND_MAX;
}
Expand Down
8 changes: 8 additions & 0 deletions tests/arch/generic/activation/activation.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
void test_nn_act_func_init();
void test_nn_act_func();
void test_nn_act_func_identity();
void test_nn_act_func_sigmoid();
void test_nn_act_func_relu();
void test_nn_act_func_softmax();
void test_nn_act_func_scalar_batch();
void test_nn_act_func_tensor_batch();
15 changes: 15 additions & 0 deletions tests/arch/generic/activation/include.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
tests/arch/generic/activation/nn_act_func_init.c
tests/arch/generic/activation/nn_act_func.c
tests/arch/generic/activation/nn_act_func_identity.c
tests/arch/generic/activation/nn_act_func_sigmoid.c
tests/arch/generic/activation/nn_act_func_relu.c
tests/arch/generic/activation/nn_act_func_softmax.c
tests/arch/generic/activation/nn_act_func_scalar_batch.c
tests/arch/generic/activation/nn_act_func_tensor_batch.c
src/nn_activation.c
src/nn_app.c
src/nn_argmax.c
src/nn_config.c
src/nn_error.c
src/nn_test.c
src/nn_tensor.c
18 changes: 18 additions & 0 deletions tests/arch/generic/activation/main.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#include "./activation.h"
#include "nn_app.h"

int main(int argc, char *argv[]) {
nn_init_app(argc, argv);
// nn_set_debug_level(5); // for debugging

test_nn_act_func_init();
test_nn_act_func();
test_nn_act_func_identity();
test_nn_act_func_sigmoid();
test_nn_act_func_relu();
test_nn_act_func_softmax();
test_nn_act_func_scalar_batch();
test_nn_act_func_tensor_batch();

return 0;
}
Loading

0 comments on commit 7398af7

Please sign in to comment.