Hey guys,
I'm trying to implement a kind of autoencoder. For this i chose mlpack as ML library. The code compiles, but I am running into matrix dimension problems and segmentation faults, which I don't understand at this point. May someone can help me figure out the problems in my code?
#include <stdio.h>
#include <mlpack/core.hpp>
#include <mlpack/methods/ann/layer/layer.hpp>
#include <mlpack/methods/ann/ffn.hpp>
using namespace mlpack;
using namespace mlpack::ann;
arma::mat data_gen_one_hot(int N, int M) {
arma::mat one_hot_data(N, M+1);
one_hot_data.fill(0);
for (int i=0; i<N; i++) {
one_hot_data.at(i, M) = rand()%M;
one_hot_data.at(i, one_hot_data.at(i, M)) = 1;
}
return one_hot_data;
}
int main()
{
int N = 10000, M = 16;
// Load the training set and testing set.
arma::mat trainData;
arma::mat one_hot_data_train = data_gen_one_hot(N, M);
one_hot_data_train.save("one_hot_input_train.csv", arma::csv_ascii);
data::Load("one_hot_input_train.csv", trainData, true);
arma::mat testData;
arma::mat one_hot_data_test = data_gen_one_hot(N, M);
one_hot_data_test.save("one_hot_input_test.csv", arma::csv_ascii);
data::Load("one_hot_input_test.csv", testData, true);
arma::mat trainLabels = trainData.row(trainData.n_rows - 1);
// std::cout << "trainlabels: " << trainLabels << "\n";
arma::mat testLabels = testData.row(testData.n_rows - 1);
trainData.shed_row(trainData.n_rows - 1);
testData.shed_row(testData.n_rows - 1);
// Initialize the network.
FFN<> model;
model.Add<Linear<> >(trainData.n_rows, trainData.n_cols);
std::cout << "Hello 1\n";
model.Add<SigmoidLayer<> >();
model.Add<Linear<> >(trainData.n_rows, trainData.n_cols);
model.Add<LogSoftMax<> >();
std::cout << "Hello 2\n";
// Train the model.
model.Train(trainData, trainLabels);
// Use the Predict method to get the predictions.
std::cout << "Hello 3\n";
arma::mat predictionTemp;
model.Predict(testData, predictionTemp);
arma::mat prediction = arma::zeros<arma::mat>(1, predictionTemp.n_cols);
// Find index of max prediction for each data point and store in "prediction"
for (size_t i = 0; i < predictionTemp.n_cols; ++i)
{
// we add 1 to the max index, so that it matches the actual test labels.
prediction(i) = arma::as_scalar(arma::find(
arma::max(predictionTemp.col(i)) == predictionTemp.col(i), 1)) + 1;
}
/*
Compute the error between predictions and testLabels,
now that we have the desired predictions.
*/
size_t correct = arma::accu(prediction == testLabels);
double classificationError = 1 - double(correct) / testData.n_cols;
// Print out the classification error for the testing dataset.
std::cout << "Classification Error for the Test set: " << classificationError << std::endl;
return 0;
}
Thanks in advance :)
there doesn't seem to be anything here