AlgoPlus v0.1.0
Loading...
Searching...
No Matches
perceptron.h
1#pragma once
2
3#ifdef __cplusplus
4#include <cassert>
5#include <iostream>
6#include <vector>
7#include <functional>
8#include "../activation/activation_functions.h"
9#include "../metrics/metrics.h"
10#include "../../algorithms/math/multiply.h"
11#include "nn.h"
12#endif
13
18 private:
19 std::vector<std::vector<double>> data_;
20 std::vector<double> labels_;
21 int epochs_;
22 int num_classes_;
23 double learning_rate_;
24 nn::Linear weights_;
25
26 public:
34 explicit perceptron(std::vector<std::vector<double>> const&,
35 const int num_classes,
36 const int epochs = 100,
37 const double learning_rate = 0.001
38 );
39
43 void fit(const int batch_size);
44
50 double predict(std::vector<double> const&);
51};
52
53inline perceptron::perceptron(std::vector<std::vector<double>> const& data,
54 const int num_classes,
55 const int epochs,
56 const double learning_rate)
57 : weights_(data[0].size() - 1, 1, true) {
58 assert(data.size() > 0);
59 assert(epochs > 0);
60 assert(learning_rate > 0);
61 assert(num_classes >= 1);
62 this->num_classes_ = num_classes;
63 this->epochs_ = epochs;
64 this->data_ = data;
65 this->learning_rate_ = learning_rate;
66 for (std::vector<double>& row : this->data_) {
67 this->labels_.push_back(row.back());
68 row.pop_back();
69 }
70}
71
72inline void perceptron::fit(
73 const int batch_size
74) {
75 for (int epoch = 0; epoch < this->epochs_; epoch++) {
76 std::vector<double> y_preds;
77 int curr_batch = 0;
78 int s_batch = 0;
79 int e_batch = std::min(batch_size, int(this->data_.size()));
80 double avg_error = 0.0;
81 std::vector<double> batch_err;
82 std::vector<std::vector<double> > batch_inputs;
83 while (s_batch < int(this->data_.size())) {
84 batch_err.clear();
85 batch_inputs.clear();
86 avg_error = 0.0;
87
88 for (int i = s_batch; i < e_batch; i++) {
89 double y_pred_ = (this->weights_.forward(this->data_[i])[0] > 0);
90 double err_ = y_pred_ - this->labels_[i];
91 batch_err.push_back(err_);
92 avg_error += err_;
93 batch_inputs.push_back(this->data_[i]);
94 y_preds.push_back(y_pred_);
95 }
96 avg_error /= (e_batch - s_batch);
97
98 std::vector<double> avg_gradients(this->data_[0].size(), 0.0);
99 std::vector<std::vector<double> > grad_w(batch_size, std::vector<double>(this->data_[0].size()));
100 for (int sample = 0; sample < (e_batch - s_batch); sample++) {
101 for (int features = 0; features < int(batch_inputs[0].size()); features++) {
102 avg_gradients[features] += batch_err[sample] * batch_inputs[sample][features];
103 }
104 }
105 for (int features = 0; features < int(avg_gradients.size()); features++) {
106 avg_gradients[features] /= (e_batch - s_batch);
107 }
108 this->weights_.update_weights(avg_gradients, avg_error, this->learning_rate_);
109
110 curr_batch++;
111 s_batch = curr_batch * batch_size;
112 e_batch = std::min((curr_batch + 1) * batch_size, int(this->data_.size()));
113 }
114
115 std::cout << "Epoch: " << epoch + 1 << ": "
116 << "Accuracy: " << metrics::accuracy_score(this->labels_, y_preds)
117 << " | f1_score: " << metrics::f1_score(this->labels_, y_preds)
118 << " | Recall: " << metrics::recall(this->labels_, y_preds)
119 << " | Precision: " << metrics::precision(this->labels_, y_preds) << '\n';
120 }
121}
122
123inline double perceptron::predict(std::vector<double> const& input) {
124 assert(input.size() == this->data_[0].size());
125 return (this->weights_.forward(input)[0] > 0);
126}
Linear module. This implementation mostly follows PyTorch's implementation.
Definition nn.h:18
perceptron(std::vector< std::vector< double > > const &, const int num_classes, const int epochs=100, const double learning_rate=0.001)
default constructor for perceptron class
Definition perceptron.h:53
void fit(const int batch_size)
fit a single perceptron on the input data
Definition perceptron.h:72
double predict(std::vector< double > const &)
performs inference, classifying to 1 or -1
Definition perceptron.h:123
double f1_score(const std::vector< double > &y, const std::vector< double > &y_pred)
f1 score function: [2 * precision * recall / precision + recall]
Definition metrics.h:99
double accuracy_score(const std::vector< double > &y, const std::vector< double > &y_pred)
accuracy score function[(tp + tn) / (tp + tn + fp + fn)]
Definition metrics.h:81
double recall(const std::vector< double > &y, const std::vector< double > &y_pred)
recall function[tp / tp + fn]
Definition metrics.h:72
double precision(const std::vector< double > &y, const std::vector< double > &y_pred)
precision function[tp / tp + fp]
Definition metrics.h:90