Fix feed forward implementation and tests

This commit is contained in:
Alex Selimov 2025-03-29 12:52:35 -04:00
parent 59d27f47f6
commit 7cefc9baf1
2 changed files with 20 additions and 19 deletions

View File

@ -16,14 +16,13 @@ std::vector<float> feed_layer(std::vector<float>::iterator weight_start,
// Calculate the new A vector from the current weights
std::vector<float> Anew;
Anew.reserve(std::distance(weight_start, weight_end));
std::transform(weight_start, weight_end, Anew.begin(),
[&A, &activation_func](float weight) {
float summed_weight = std::accumulate(
A.begin(), A.end(), 0.0f, [&weight](float acc, float a) {
return acc + a * weight;
});
return summed_weight;
});
std::transform(
weight_start, weight_end, std::back_inserter(Anew), [&A](float weight) {
float summed_weight = std::accumulate(
A.begin(), A.end(), 0.0f,
[&weight](float acc, float a) { return acc + a * weight; });
return summed_weight;
});
activation_func(Anew);
return Anew;
};

View File

@ -5,8 +5,11 @@
// Simple identity activation function for testing
struct Identity {
void operator()(std::vector<float>& x) const {
void operator()(std::vector<float> &x) const {
// Identity function - no change to values
// Below statement is needed to remove compiler warning about unused var
// warning
(void)x;
}
};
@ -14,20 +17,19 @@ TEST(UtilityTest, FeedLayerIdentityTest) {
// Test with identity activation function for simple verification
// Input: [1, 2]
// Weights: [0.5, -0.5, 1.0, -1.0]
// Expected: [0.5, -1.0] (manually calculated)
// First output: 1.0 * 0.5 + 2.0 * -0.5 = 0.5
// Second output: 1.0 * 1.0 + 2.0 * -1.0 = -1.0
std::vector<float> weights = {0.5, -0.5, 1.0, -1.0};
std::vector<float> input = {1.0, 2.0};
Identity identity;
auto output = Utilities::feed_layer<Identity>(weights.begin(), weights.end(),
input, identity);
input, identity);
ASSERT_EQ(output.size(), 2);
EXPECT_NEAR(output[0], 0.5f, 1e-5); // 1.0 * 0.5 + 2.0 * -0.5
EXPECT_NEAR(output[1], -1.0f, 1e-5); // 1.0 * 1.0 + 2.0 * -1.0
ASSERT_EQ(output.size(), 4);
EXPECT_NEAR(output[0], 1.5f, 1e-5); // 1.0 * 0.5 + 2.0 * 0.5
EXPECT_NEAR(output[1], -1.5f, 1e-5); // 1.0 * -0.5 + 2.0 * -0.5
EXPECT_NEAR(output[2], 3.0f, 1e-5); // 1.0 * 1.0 + 2.0 * 1.0
EXPECT_NEAR(output[3], -3.0f, 1e-5); // 1.0 * -1.0 + 2.0 * -1.0
}
TEST(UtilityTest, FeedLayerSigmoidTest) {
@ -39,7 +41,7 @@ TEST(UtilityTest, FeedLayerSigmoidTest) {
Sigmoid sigmoid;
auto output = Utilities::feed_layer<Sigmoid>(weights.begin(), weights.end(),
input, sigmoid);
input, sigmoid);
ASSERT_EQ(output.size(), 2);
// Note: Sigmoid is applied to the whole vector after matrix multiplication
@ -58,7 +60,7 @@ TEST(UtilityTest, FeedLayerSoftMaxTest) {
SoftMax softmax;
auto output = Utilities::feed_layer<SoftMax>(weights.begin(), weights.end(),
input, softmax);
input, softmax);
ASSERT_EQ(output.size(), 2);
// Both outputs should be 0.5 since inputs to softmax are equal (both 2.0)
@ -72,7 +74,7 @@ TEST(UtilityTest, FeedLayerEmptyInput) {
Identity identity;
auto output = Utilities::feed_layer<Identity>(weights.begin(), weights.end(),
input, identity);
input, identity);
ASSERT_EQ(output.size(), 2);
EXPECT_NEAR(output[0], 0.0f, 1e-5);