#ifndef __Backpropagation_Neural_Network_Defined__ #define __Backpropagation_Neural_Network_Defined__ #include using namespace std; #define NUMBER_E 2.718281828459045235360287471352662497757247093699959574966 double Activation_Function(double); double Derivative_Activation_Function(double); double Get_Rand_In_Range(double); struct Neuron_Position { int Layer; int Neuron; }; class Neuron { friend class Backpropagation_Neural_Network; private: void Set_Default_Values(); double Total_Input; double Output; double Bias; double Bias_Adjustment; double Error_Signal; int Num_Of_Posterior_Neurons; int Num_Of_Anterior_Neurons; double * pWeight; double * pWeight_Adjustment; Neuron ** pPosterior_Neuron; Neuron ** pAnterior_Neuron; public: Neuron(); ~Neuron(); }; class Layer { friend class Backpropagation_Neural_Network; private: void Set_Default_Values(); Neuron * pNeuron; int Num_Of_Neurons; public: Layer(); ~Layer(); }; class Backpropagation_Neural_Network { private: void Set_Default_Values(); void Assign_Posterior_Neurons(); void Assign_Anterior_Neurons(); void Assign_Weights(); double Learning_Rate; double Momentum; double Mean_Squared_Error; void Forward_Propagate(double *); void Back_Propagate(double *); void Apply_Weight_Adjustments(); int Num_Of_Layers; Layer * pLayer; public: void Init(int *,double); void Train_Once(double *,double *); string Give_Network_Info_String(); Backpropagation_Neural_Network(); ~Backpropagation_Neural_Network(); }; /* *************************************************************************************** *******************************HOW TO USE THIS THING*********************************** From the point of view of abstraction all you need to know is that this is a backpropagation neural network; you present it with an array of normalized numbers (normalized in the range -1 to 1) that describes the input or information about the 'situation'. It then tries to figure out what it is suppoused to output based on this input. You then tell it what it was suppoused to output by giving it an an array of normalized numbers (-1 to 1) that describe the decision or output The meaning of the numbers is up to you, it could represent normalized pixel values or air temperature. If there is any statistical correlation between what you input and what you want outputted the network will find it. Note: in the future there should be a momentum variable built into this so it does not get stuck in local optima. Here is an example of how you might use this Backpropagation_Neural_Network b; //declare our backpropagation network class int * Init_Info = new int[4]; //make an array for startup info Init_Info[0] = 3; //there are 3 layers in the network we want Init_Info[1] = 2; //input layer has 2 neurons Init_Info[2] = 3; //hidden layer has 3 neurons Init_Info[3] = 2; //output layer has 2 neuron b.Init(Init_Info,0.01) //init the network, starting it with the array of network info and a learning rate of 0.01 bool Finished_Learning = false; //flag double * In = new double[2]; //input layer has 2 neurons double * Out = new double[2]; //output layer has 2 neurons while (!(Finished_Learning)) { Fill_Normalized_Input_And_Output_Vectors(In,Out); //fill the arrays with training data b.Train_Once(In,Out); //forward propagate, backpropagate and apply weight adjustments } *************************************************************************************** *************************************************************************************** */ #endif