#ifndef AI_H #define AI_H #include #include #include #include #include #include #include #include #include #define randomnf(Type) (static_cast (rand()) / static_cast (RAND_MAX / 2) - 1) #define randomf(Type) (static_cast (rand()) / static_cast (RAND_MAX)) #define randomfh(Type, h) (static_cast (rand()) / static_cast (RAND_MAX) * h) #define sizeofArr(a) (sizeof a / sizeof a[0]) #define sigmoid(x) (1 / (1 + std::pow(M_E, -x))) namespace ai { template struct matrix { //Data T data[w][h]; //TODO: change this to rows and cols to values, not void static constexpr size_t rows() { return w; } static constexpr size_t cols() { return h; } //Randomize all of the points with via -1 to 1 void randomize() { for (size_t t = 0; t < w; t++) for (size_t t2 = 0; t2 < h; t2++) data[t][t2] = randomnf(T); } //TODO: put a requirement to test if the data is convertable to float template matrix operator +(mulType v) { matrix ret; for (size_t i = 0; i < w; i++) for (size_t j = 0; j < h; j++) ret.data[i][j] += v; return data; } matrix operator +(matrix n) { matrix newMatrix; if (cols() == n.cols() && rows() == n.rows()) { for (size_t i = 0; i < rows(); i++) { for (size_t j = 0; j < cols(); j++) { newMatrix.data[i][j] = data[i][j] + n.data[i][j]; } } } return newMatrix; } template void operator +=(mulType v) { for (size_t i = 0; i < w; i++) for (size_t j = 0; j < h; j++) data[i][j] += v; } template matrix operator -(mulType v) { matrix ret; for (size_t i = 0; i < w; i++) for (size_t j = 0; j < h; j++) ret.data[i][j] -= v; return data; } matrix operator -(matrix n) { matrix newMatrix; if (cols() == n.cols() && rows() == n.rows()) { for (size_t i = 0; i < rows(); i++) { for (size_t j = 0; j < cols(); j++) { newMatrix.data[i][j] = data[i][j] - n.data[i][j]; } } } return newMatrix; } template void operator -=(mulType v) { for (size_t i = 0; i < w; i++) for (size_t j = 0; j < h; j++) data[i][j] -= v; } template matrix operator *(mulType v) { matrix ret; for (size_t i = 0; i < w; i++) for (size_t j = 0; j < h; j++) ret.data[i][j] *= v; return data; } template void operator *=(mulType v) { for (size_t i = 0; i < w; i++) for (size_t j = 0; j < h; j++) data[i][j] *= v; } matrix operator *(matrix n) { matrix newMatrix; if (cols() == n.cols() && rows() == n.rows()) { for (size_t i = 0; i < rows(); i++) { for (size_t j = 0; j < cols(); j++) { newMatrix.data[i][j] = data[i][j] * n.data[i][j]; } } } return newMatrix; } //Returns the dot product of this matrix and N matrix // \param n The matrix to dot auto dot(matrix n) { matrix result; //for each spot in the new matrix if (cols() == n.rows()) for (size_t i = 0; i < rows; i++) { for (size_t j = 0; j < n.cols; j++) { T sum = 0; for (size_t k = 0; k < cols; k++) { sum += data[i][k] * n.data[k][j]; } result.matrix[i][j] = sum; } } return result; } //Returns the transpose of the matrix auto transpose() { matrix result; for (size_t i = 0; i < w; i++) { for (size_t j = 0; j < h; j++) { result.data[j][i] = data[i][j]; } } return result; } //Create a single colum array from the parameter array auto singleColumnMatrixFromArray(std::vector arr) { matrix n; for (size_t i = 0; i < arr.size(); i++) { n.data[i][0] = arr[i]; } return n; } //Create a single colum array from the parameter array void fromArray(std::vector arr) { std::memcpy(data, arr.data(), arr.size() * sizeof(T)); } //Return a single colum array from the parameter array std::vector toArray() { std::vector ret(w * h); std::memcpy(ret.data(), data, w * h * sizeof(T)); return ret; } //For ix1 matrixes adds to the bottom auto addBias() { matrix n; for (size_t i = 0; i < rows(); i++) { n.data[i][0] = data[i][0]; } n.data[rows()][0] = 1; return n; } //Applies the activation function(sigmoid) to each element of the matrix matrix activate() { matrix n; for (size_t i = 0; i < rows(); i++) { for (size_t j = 0; j < cols(); j++) { n.data[i][j] = sigmoid(data[i][j]); } } return n; } //Returns the matrix that is the derived sigmoid function of the current matrix matrix sigmoidDerived() { matrix n; for (size_t i = 0; i < rows(); i++) { for (size_t j = 0; j < cols(); j++) { n.data[i][j] = (data[i][j] * (1 - data[i][j])); } } return n; } //Returns the matrix which is this matrix with the bottom layer removed auto removeBottomLayer() { matrix n; for (size_t i = 0; i < n.rows(); i++) { for (size_t j = 0; j < cols(); j++) { n.data[i][j] = data[i][j]; } } return n; } //Mutation function for genetic algorithm void mutate(T mutationRate) { for (size_t i = 0; i < rows(); i++) { for (size_t j = 0; j < cols(); j++) { T trand = randomf(T); if (trand < mutationRate) { //data[i][j] += randomGaussian() / 5; if (data[i][j] > 1) { data[i][j] = 1; } if (data[i][j] < -1) { data[i][j] = -1; } } } } } //Returns a matrix which has a random number of values from this matrix and the rest from the parameter matrix auto crossover(matrix partner) { matrix child; //pick a random point in the matrix T randA = randomfh(T, cols()); T randB = randomfh(T, rows()); size_t randC = (size_t)std::floor(randA); size_t randR = (size_t)std::floor(randB); for (size_t i = 0; i < rows(); i++) { for (size_t j = 0; j < cols(); j++) { if ((i < randR) || (i == randR && j <= randC)) { child.data[i][j] = data[i][j]; } else { child.data[i][j] = partner.data[i][j]; } } } return child; } //Return a copy of this matrix auto clone() { matrix clone; for (size_t i = 0; i < rows; i++) { for (size_t j = 0; j < cols; j++) { clone.data[i][j] = data[i][j]; } } return clone; } }; template struct network { matrix whi; //The first layer of weights matrix whh; //The hidden nodes matrix woh; //The last layer of weights //Randomize all the networks network() { whi.randomize(); whh.randomize(); woh.randomize(); } //Mutate all the functions void mutate(T mr) { whi.mutate(mr); whh.mutate(mr); woh.mutate(mr); } //Get the output values from the network std::vector output(std::vector inputsArr) { //Note woh has nothing to do with it its just a function in the Matrix class auto inputs = woh.singleColumnMatrixFromArray(inputsArr); //And add the bias auto inputsBias = inputs.addBias(); //-----------------------calculate the guessed output //apply layer one weights to the inputs auto hiddenInputs = whi.dot(inputsBias); //pass through activation function(sigmoid) auto hiddenOutputs = hiddenInputs.activate(); //add bias auto hiddenOutputsBias = hiddenOutputs.addBias(); //apply layer two weights auto hiddenInputs2 = whh.dot(hiddenOutputsBias); auto hiddenOutputs2 = hiddenInputs2.activate(); auto hiddenOutputsBias2 = hiddenOutputs2.addBias(); //apply level three weights auto outputInputs = woh.dot(hiddenOutputsBias2); //pass through activation function(sigmoid) auto outputs = outputInputs.activate(); //convert to an array and return return outputs.toArray(); } //Crossover function for genetic algorithm network crossover(network partner) { //Creates a new child with layer matrices from both parents network< iNodes, hNodes, oNodes> child; child.whi = whi.crossover(partner.whi); child.whh = whh.crossover(partner.whh); child.woh = woh.crossover(partner.woh); return child; } //Function to copy and clone the network network clone() { network< iNodes, hNodes, oNodes> clone; clone.whi = whi.clone(); clone.whh = whh.clone(); clone.woh = woh.clone(); return clone; } }; } #endif