generated from aselimov/cpp_project_template
120 lines
3.4 KiB
C++
120 lines
3.4 KiB
C++
#include "../src/activation_function.hpp"
|
|
#include "../src/neural_net.hpp"
|
|
#include <cmath>
|
|
#include <gtest/gtest.h>
|
|
#include <stdexcept>
|
|
|
|
class NeuralNetTest : public ::testing::Test {
|
|
protected:
|
|
void SetUp() override {
|
|
// Create a simple neural network with 2 input neurons, 2 hidden neurons,
|
|
// and 2 output neurons
|
|
std::vector<size_t> layer_sizes = {2, 2, 2};
|
|
net = std::make_unique<NeuralNet<Sigmoid>>(layer_sizes);
|
|
}
|
|
|
|
std::unique_ptr<NeuralNet<Sigmoid>> net;
|
|
};
|
|
|
|
TEST_F(NeuralNetTest, FeedForward_SimpleNetwork) {
|
|
// Test a simple network with known weights and inputs
|
|
std::vector<float> input = {0.5f, 0.5f};
|
|
|
|
// Set known weights for testing
|
|
std::vector<Matrix<float>> weights = {
|
|
Matrix<float>(2, 2, 0.5f), // First layer weights
|
|
Matrix<float>(2, 2, 0.5f) // Output layer weights
|
|
};
|
|
|
|
// Replace the network's weights with our test weights
|
|
net->set_weights(weights);
|
|
|
|
// Calculate expected output manually
|
|
// First layer: Z1 = W1 * X
|
|
Matrix<float> X(2, 1, 0.0);
|
|
X(0, 0) = input[0];
|
|
X(1, 0) = input[1];
|
|
|
|
Matrix<float> Z1 = weights[0] * X;
|
|
// Apply sigmoid activation
|
|
Sigmoid::apply(Z1.data());
|
|
|
|
// Second layer: Z2 = W2 * A1
|
|
Matrix<float> Z2 = weights[1] * Z1;
|
|
SoftMax::apply(Z2.data());
|
|
|
|
// Convert to output vector
|
|
std::vector<float> expected_output(Z2.cols());
|
|
for (size_t i = 0; i < Z2.rows(); i++) {
|
|
expected_output[i] = Z2(i, 0);
|
|
}
|
|
|
|
// Get actual output from feed_forward
|
|
std::vector<float> output = net->feed_forward(input);
|
|
|
|
// Compare actual and expected outputs
|
|
for (size_t i = 0; i < output.size(); i++) {
|
|
EXPECT_NEAR(output[i], expected_output[i], 1e-6);
|
|
}
|
|
}
|
|
|
|
TEST_F(NeuralNetTest, FeedForward_DifferentLayerSizes) {
|
|
// Create a network with different layer sizes
|
|
std::vector<size_t> layer_sizes = {3, 4, 2};
|
|
NeuralNet<Sigmoid> net2(layer_sizes);
|
|
|
|
std::vector<float> input = {0.1f, 0.2f, 0.3f};
|
|
std::vector<float> output = net2.feed_forward(input);
|
|
|
|
// Output should have 2 elements (size of last layer)
|
|
EXPECT_EQ(output.size(), 2);
|
|
}
|
|
|
|
TEST_F(NeuralNetTest, FeedForward_InvalidInputSize) {
|
|
std::vector<float> input = {0.1f}; // Only 1 input, but network expects 2
|
|
|
|
// This should throw an exception since input size doesn't match first layer
|
|
// size
|
|
EXPECT_THROW(net->feed_forward(input), std::invalid_argument);
|
|
}
|
|
|
|
TEST_F(NeuralNetTest, FeedForward_IdentityTest) {
|
|
// Create a network with identity weights (1.0) and no bias
|
|
std::vector<size_t> layer_sizes = {2, 2};
|
|
NeuralNet<Sigmoid> net2(layer_sizes);
|
|
|
|
// Set weights to identity matrix
|
|
std::vector<Matrix<float>> weights = {Matrix<float>(2, 2, 1.0f)};
|
|
|
|
net2.set_weights(weights);
|
|
|
|
std::vector<float> input = {0.5f, 0.5f};
|
|
std::vector<float> output = net2.feed_forward(input);
|
|
|
|
// Since we're using sigmoid activation, the output should be
|
|
// sigmoid(0.5 + 0.5) = sigmoid(1.0) for each neuron
|
|
std::vector<float> expected_output = input;
|
|
SoftMax::apply(expected_output);
|
|
|
|
for (float val : output) {
|
|
EXPECT_NEAR(val, expected_output[0], 1e-6);
|
|
}
|
|
}
|
|
|
|
TEST_F(NeuralNetTest, FeedForward_SoftmaxOutput) {
|
|
std::vector<float> input = {1.0f, -1.0f};
|
|
std::vector<float> output = net->feed_forward(input);
|
|
|
|
// Verify that the output sums to 1 (property of softmax)
|
|
float sum = 0.0f;
|
|
for (float val : output) {
|
|
sum += val;
|
|
}
|
|
EXPECT_NEAR(sum, 1.0f, 1e-6);
|
|
|
|
// Verify that all outputs are positive
|
|
for (float val : output) {
|
|
EXPECT_GT(val, 0.0f);
|
|
}
|
|
}
|