神经网络基本算法源程序BF,ART I,RBF,自组织算法-hilbert算法 源程序-神经网络

编辑:光环大数据 来源: 互联网 时间: 2018-01-26 18:08 阅读:

 

光环大数据作为国内知名的人工智能培训的机构,帮助无数学员稳健、扎实的提升人工智能技术,来光环大数据学人工智能,高薪就业不是梦!

// pittnet.CPP // Backpropagation / ART1 / Kohonen / Radial Basis

// The purpose of this prototype is to allow the user to construct and
// initialize a series of neural nets. Using the concept of inheritance and
// derived classes from C++ object oriented programming, the neceessity to
// declare multiple large structures that duplicate attributes is eliminated
// Utilizing pointers and the "new" function, dynamic arrays are established
// The user can then specify the storage array size for the number of hidden
// units and output units for the neural network while the program is running.
// This strategy eliminates the need to establish extremely large arrays
// while still maintaining the flexibility required to design nets of various.
// shapes and sizes. The "Neural" classes allows the attributes of the newly
// constructed networks to be stored for further processing.

#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <iostream.h>
#include <string.h>
#include <conio.h>
#include <float.h>
#include <fstream.h>
#include <ctype.h>

#define IA 16807
#define IM 2147483647
#define AM (1.0 / IM)
#define IQ 127773
#define IR 2836
#define NTAB 32
#define NDIV (1+(IM-1) / NTAB)
#define EPS 1.2e-7
#define RNMX (1.0 - EPS)

// The following function is a random number generator
float bedlam(long *idum);
int gaset = -2500;

float bedlam(long *idum)
{
int xj;
long xk;
static long iy=0;
static long iv[NTAB];
float temp;

if(*idum <= 0 || !iy)
{
if(-(*idum) < 1)
{
*idum = 1 + *idum;
}
else
{
*idum = -(*idum);
}
for(xj = NTAB+7; xj >= 0; xj--)
{
xk = (*idum) / IQ;
*idum = IA * (*idum - xk * IQ) - IR * xk;
if(*idum < 0)
{
*idum += IM;
}
if(xj < NTAB)
{
iv[xj] = *idum;
}
}
iy = iv[0];
}

xk = (*idum) / IQ;
*idum = IA * (*idum - xk * IQ) - IR * xk;
if(*idum < 0)
{
*idum += IM;
}
xj = iy / NDIV;
iy = iv[xj];
iv[xj] = *idum;

if((temp=AM*iy) > RNMX)
{
return(RNMX);
}
else
{
return(temp);
}
} // end of bedlam function

//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

// (Fully connected network using backpropagation)

// In this base class, all nodes in the network have the following attributes

class Processing_units
{
public:
float *processing_unit_input;
int number_of_input_units;
void establish_array_of_processing_unit_inputs(void);
float *weight_of_inputs;
void establish_weight_vector_for_processing_units(void);
float bias;
float output_signal;
void calculate_output_signal(int activation_function);
float calculate_output_signal_derivative(int afun);
float error_information_term;
void calculate_weight_and_bias_correction_terms(float learning_rate);
float *weight_correction_term;
float bias_correction_term;
float sum_of_weighted_inputs;
void update_weights_and_biases(void);
Processing_units();
~Processing_units();
};

Processing_units::Processing_units()
{
bias = 0.0;
output_signal = 0.0;
error_information_term = 0.0;
bias_correction_term = 0.0;
sum_of_weighted_inputs = 0.0;
}

Processing_units::~Processing_units()
{
delete [] processing_unit_input;
delete [] weight_of_inputs;
delete [] weight_correction_term;
}

// Define base class member functions

void Processing_units::establish_array_of_processing_unit_inputs(void)
{
processing_unit_input = new float[number_of_input_units];
weight_of_inputs = new float[number_of_input_units];
weight_correction_term = new float[number_of_input_units];
}

void Processing_units::establish_weight_vector_for_processing_units(void)
{
for(int i = 0; i < number_of_input_units; i++)
{
// weights range from 1 to -1
weight_of_inputs[i] = 1.0 - (2.0 * bedlam((long*)(gaset)));
}
}

void Processing_units::calculate_output_signal(int activation_function)
{
sum_of_weighted_inputs = 0.0;
for(int i = 0; i < number_of_input_units; i++)
{
if(i == number_of_input_units - 1)
{sum_of_weighted_inputs += (processing_unit_input[i] * weight_of_inputs[i]) + bias;}
else
{sum_of_weighted_inputs += processing_unit_input[i] * weight_of_inputs[i];}
}

switch(activation_function)
{
case 1: // binary sigmoid function
output_signal = 1.0 / (1.0 + exp(-1.0 * sum_of_weighted_inputs));
break;

case 2: // bipolar sigmoid function
output_signal = (2.0 / (1.0 + exp(-1.0 * sum_of_weighted_inputs))) - 1;
break;
}

}

float Processing_units::calculate_output_signal_derivative(int afun)
{
float derivative;
switch(afun) // derivative used based on activation function seleted
{
case 1: // binary sigmoid function
derivative = output_signal * (1.0 - output_signal);
break;

case 2: // bipolar sigmoid function
derivative = 0.5 * (1.0 + output_signal) * (1.0 - output_signal);
break;
}
return derivative;
}

void Processing_units::calculate_weight_and_bias_correction_terms(float learning_rate)
{
for(int i = 0; i < number_of_input_units; i++)
{weight_correction_term[i] = learning_rate * error_information_term * processing_unit_input[i];}
bias_correction_term = learning_rate * error_information_term;
error_information_term = 0.0;
update_weights_and_biases();
}

void Processing_units::update_weights_and_biases(void)
{
for(int i = 0; i < number_of_input_units; i++)
{weight_of_inputs[i] = weight_of_inputs[i] + weight_correction_term[i];}
bias = bias + bias_correction_term;
}

// Declare a derived class "Hidden_units" for hidden layer of network
class Hidden_units : public Processing_units
{
public:
void calculate_hidden_error_information_term(int afun);
};

// Define member functions for derived class "Hidden_units"
void Hidden_units::calculate_hidden_error_information_term(int afun)
{
float af = afun;
float output_signal_derivative = calculate_output_signal_derivative(af);
error_information_term = error_information_term * output_signal_derivative;
}

// Declare a derived class "Output_units" for output layer of network
class Output_units : public Processing_units
{
public:
void calculate_output_error_information_term(float target_value, int af);
float absolute_error_difference;
float error_difference_squared;
};


// Define member functions for derived class "Output_units"
void Output_units::calculate_output_error_information_term(float target_value, int af)
{
float afun = af;
float output_signal_derivative = calculate_output_signal_derivative(afun);
absolute_error_difference = fabs(target_value - output_signal);
error_information_term = (target_value - output_signal) * output_signal_derivative;
error_difference_squared = pow((target_value - output_signal), 2.0);
}

// Create classes to contain neural net specifications
class Hidden_layer
{
public:
Hidden_units *node_in_hidden_layer;
int nodes_in_hidden_layer;
~Hidden_layer();
};

Hidden_layer::~Hidden_layer()
{delete [] node_in_hidden_layer;}

// The following class represents an artificial neural network containing
// the topology, weights, training performance and testing performance
class Back_Topology
{
public:
Hidden_layer *hidden_layer_number;
Output_units *node_in_output_layer;
int number_of_hidden_layers;
int activation_function_for_hidden_layer;
int nodes_in_output_layer;
int activation_function_for_output_layer;
int signal_dimensions;
int number_of_tests;
void establish_activation_functions(void);
void construct_and_initialize_backprop_network(void);
void upload_network(void);
void savenet(void);
~Back_Topology();
};


void Back_Topology::construct_and_initialize_backprop_network(void)
{
int nodes, inputs_to_output_node;
char netcreate;
int looploc = 0;

do
{
cout <<"/n";
cout << "Do you wish to" << "/n/n";
cout << "C. Create your own Backprop Network " << "/n";
cout << "U. Upload an existing Backprop Network " << "/n/n";
cout << "Your choice?: "; cin >> netcreate;
netcreate = toupper(netcreate);
cout << "/n";
if((netcreate == 'C') || (netcreate == 'U')) {looploc = 1;}
} while(looploc <= 0);

if(netcreate == 'U')
{upload_network();}
else
{
cout << "Please enter the dimensions of the input vector: ";
cin >> signal_dimensions;
cout << "/n/n";
do
{
cout << "please enter the number of hidden layers (0 - 2): ";
cin >> number_of_hidden_layers;
cout << "/n/n";
} while(number_of_hidden_layers > 2);

if(number_of_hidden_layers > 0)
{
hidden_layer_number = new Hidden_layer[number_of_hidden_layers];
for(int layer = 0; layer < number_of_hidden_layers; layer++)
{
cout << "please enter the number of nodes in hidden layer " << layer + 1 << ": ";
cin >> hidden_layer_number[layer].nodes_in_hidden_layer;
cout << "/n/n";
}
}
cout << "/n";
cout << "please enter the number of nodes in the output layer: ";
cin >> nodes_in_output_layer;
cout << "/n/n";

// establish for dynamic arrays for number of nodes in hidden and output layers

if(number_of_hidden_layers > 0)
{
for(int layer = 0; layer < number_of_hidden_layers; layer++)
{
nodes = hidden_layer_number[layer].nodes_in_hidden_layer;
hidden_layer_number[layer].node_in_hidden_layer = new Hidden_units[nodes];
}
}
node_in_output_layer = new Output_units[nodes_in_output_layer];

if(number_of_hidden_layers > 0)
{
// establish input connection between signal and hidden layer
for(nodes = 0; nodes < hidden_layer_number[0].nodes_in_hidden_layer; nodes++)
{
hidden_layer_number[0].node_in_hidden_layer[nodes].number_of_input_units = signal_dimensions;
hidden_layer_number[0].node_in_hidden_layer[nodes].establish_array_of_processing_unit_inputs();
hidden_layer_number[0].node_in_hidden_layer[nodes].establish_weight_vector_for_processing_units();
hidden_layer_number[0].node_in_hidden_layer[nodes].bias = 1.0 - (2.0 * bedlam((long*)(gaset)));
}
if(number_of_hidden_layers > 1)
{
// establish connection between first and second hidden layers
for(nodes = 0; nodes < hidden_layer_number[1].nodes_in_hidden_layer; nodes++)
{
hidden_layer_number[1].node_in_hidden_layer[nodes].number_of_input_units = hidden_layer_number[0].nodes_in_hidden_layer;
hidden_layer_number[1].node_in_hidden_layer[nodes].establish_array_of_processing_unit_inputs();
hidden_layer_number[1].node_in_hidden_layer[nodes].establish_weight_vector_for_processing_units();
hidden_layer_number[1].node_in_hidden_layer[nodes].bias = 1.0 - (2.0 * bedlam((long*)(gaset)));
}
}
}

// determine number of inputs to the output layer
if(number_of_hidden_layers > 0)
{inputs_to_output_node = hidden_layer_number[number_of_hidden_layers - 1].nodes_in_hidden_layer;}
else
{inputs_to_output_node = signal_dimensions;}

// establish input connections to output layer
for(nodes = 0; nodes < nodes_in_output_layer; nodes++)
{
node_in_output_layer[nodes].number_of_input_units = inputs_to_output_node;
node_in_output_layer[nodes].establish_array_of_processing_unit_inputs();
node_in_output_layer[nodes].establish_weight_vector_for_processing_units();
node_in_output_layer[nodes].bias = 1.0 - (2.0 * bedlam((long*)(gaset)));
}
establish_activation_functions(); // for hidden and output nodes
}
} // end construct and initialize neural network function

void Back_Topology::upload_network(void)
{
char getname[13];
ifstream get_ptr;
int netid, nodes, dim, inputs_to_output_node, hid, inputs;
int dolock = 0;

do
{
cout << "/n/n";
cout << "Please enter the name of the file which holds the Backpropagation network" << "/n";
cin >> getname; cout << "/n";
get_ptr.open(getname, ios::in);
get_ptr >> netid;
if(netid == 1) {dolock = 1;}
else
{
cout << "Error** file contents do not match Backprop specifications" << "/n";
cout << "try again" << "/n";
get_ptr.close();
}
} while(dolock <= 0);

get_ptr >> signal_dimensions;
get_ptr >> activation_function_for_output_layer;
get_ptr >> nodes_in_output_layer;
get_ptr >> inputs_to_output_node;

// establish output layer
node_in_output_layer = new Output_units[nodes_in_output_layer];
for(nodes = 0; nodes < nodes_in_output_layer; nodes++)
{
node_in_output_layer[nodes].number_of_input_units = inputs_to_output_node;
node_in_output_layer[nodes].establish_array_of_processing_unit_inputs();
node_in_output_layer[nodes].establish_weight_vector_for_processing_units();
get_ptr >> node_in_output_layer[nodes].bias;
}
for(nodes = 0; nodes < nodes_in_output_layer; nodes++)
{
for(dim = 0; dim < inputs_to_output_node; dim++)
{get_ptr >> node_in_output_layer[nodes].weight_of_inputs[dim];}
}

// establish hidden layer(s)
get_ptr >> number_of_hidden_layers;
if(number_of_hidden_layers > 0)
{
hidden_layer_number = new Hidden_layer[number_of_hidden_layers];
get_ptr >> activation_function_for_hidden_layer;
for(hid = 0; hid < number_of_hidden_layers; hid++)
{
get_ptr >> hidden_layer_number[hid].nodes_in_hidden_layer;
nodes = hidden_layer_number[hid].nodes_in_hidden_layer;
hidden_layer_number[hid].node_in_hidden_layer = new Hidden_units[nodes];

if(hid == 0) {inputs = signal_dimensions;}
else
{inputs = hidden_layer_number[0].nodes_in_hidden_layer;}

for(nodes = 0; nodes < hidden_layer_number[hid].nodes_in_hidden_layer; nodes++)
{
hidden_layer_number[hid].node_in_hidden_layer[nodes].number_of_input_units = inputs;
hidden_layer_number[hid].node_in_hidden_layer[nodes].establish_array_of_processing_unit_inputs();
get_ptr >> hidden_layer_number[hid].node_in_hidden_layer[nodes].bias;
}
for(nodes = 0; nodes < hidden_layer_number[hid].nodes_in_hidden_layer; nodes++)
{
for(dim = 0; dim < inputs; dim++)
{get_ptr >> hidden_layer_number[hid].node_in_hidden_layer[nodes].weight_of_inputs[dim];}
}
}
}
get_ptr.close();
}

void Back_Topology::savenet(void)
{
char savename[13];
ofstream save_ptr;
int nodes, dim, inputs, hid;

cout << "/n/n";
cout << "Please enter the name of the file that will hold" << "/n";
cout << "the Backpropagation network: "; cin >> savename;

save_ptr.open(savename, ios::out);
save_ptr << 1 << "/n"; // network identifier number
save_ptr << signal_dimensions << "/n";
save_ptr << activation_function_for_output_layer << "/n";
save_ptr << nodes_in_output_layer << "/n";

if(number_of_hidden_layers > 0)
{inputs = hidden_layer_number[number_of_hidden_layers - 1].nodes_in_hidden_layer;}
else
{inputs = signal_dimensions;}
save_ptr << inputs << "/n";
for(nodes = 0; nodes < nodes_in_output_layer; nodes++)
{save_ptr << node_in_output_layer[nodes].bias << " ";}
save_ptr << "/n";


for(nodes = 0; nodes < nodes_in_output_layer; nodes++)
{
for(dim = 0; dim < inputs; dim++)
{save_ptr << node_in_output_layer[nodes].weight_of_inputs[dim] << " ";}
save_ptr << "/n";
}

save_ptr << number_of_hidden_layers << "/n";

if(number_of_hidden_layers > 0)
{
save_ptr << activation_function_for_hidden_layer << "/n";

for(hid = 0; hid < number_of_hidden_layers; hid++)
{
save_ptr << hidden_layer_number[hid].nodes_in_hidden_layer << "/n";
if(hid == 0) {inputs = signal_dimensions;}
else {inputs = hidden_layer_number[0].nodes_in_hidden_layer;}

for(nodes = 0; nodes < hidden_layer_number[hid].nodes_in_hidden_layer; nodes++)
{save_ptr << hidden_layer_number[hid].node_in_hidden_layer[nodes].bias << " ";}
save_ptr << "/n";

for(nodes = 0; nodes < hidden_layer_number[hid].nodes_in_hidden_layer; nodes++)
{
for(dim = 0; dim < inputs; dim++)
{save_ptr << hidden_layer_number[hid].node_in_hidden_layer[nodes].weight_of_inputs[dim] << " ";}
save_ptr << "/n";
}
}
}
save_ptr.close();
}

Back_Topology::~Back_Topology()
{
delete [] hidden_layer_number;
delete [] node_in_output_layer;
}

void Back_Topology::establish_activation_functions(void)
{
int bchoice, count;
int dolock = 1;

for(count = 0; count < 2; count++)
{
cout << "/n";
if((count == 0) && (number_of_hidden_layers > 0))
{cout << "For the nodes in the hidden layer(s):" << "/n";}
else
{cout << "For the output layer:" << "/n";}
do
{
cout << "please select the type of activation function you wish the nodes to use" << "/n/n";
cout << "1. Binary Sigmoid Function " << "/n";
cout << "2. Bipolar Sigmoid Function " << "/n/n";
cout << "Your Selection "; cin >> bchoice;
cout << "/n/n";
if((bchoice == 1) || (bchoice == 2)) {dolock = 0;}
} while(dolock >= 1);

if((count == 0) && (number_of_hidden_layers > 0))
{activation_function_for_hidden_layer = bchoice;}
else
{activation_function_for_output_layer = bchoice;}
}
}

// Declare classes that will establish training and testing data arrays
class sample_data
{
public:
float *data_in_sample; // pointer to the dimensions of a single signal
~sample_data();
};

sample_data:: ~sample_data()
{delete [] data_in_sample;}

class Data_type
{
public:
char filename[13]; // File containing data for network training or testing
char resultsname[13]; // File containing data for results of training or testing
int signal_dimensions; // Number of dimensions contained in signal
int sample_number; // Number of signals in training set
int nodes_in_output_layer; // Dimensions of test data output
sample_data *number_of_samples; // Pointer to the array containing signals
float *max_output_value;
float *min_output_value;
virtual void determine_sample_number(void);
void specify_signal_sample_size(void);
virtual void load_data_into_array(void); // Function to place data into the array
void acquire_net_info(int signal, int no_output_nodes);
void delete_signal_array(void); // Function to free memory allocated to hold signals
virtual void normalize_data_in_array(void);
~Data_type(); // class destructor
};

Data_type::~Data_type()
{
delete [] max_output_value;
delete [] min_output_value;
}

// define functions of Data_type Class
void Data_type :: determine_sample_number(void)
{
ifstream dfile_ptr; // pointer to a file
dfile_ptr.open(filename, ios::in);

float hold;
int lock = 1;
sample_number = 0;

do
{
if(dfile_ptr.eof()){lock = 0;}
else
{dfile_ptr >> hold; sample_number += 1;}
}while(lock > 0);

dfile_ptr.close();
sample_number = int(sample_number / (signal_dimensions + nodes_in_output_layer));
}

void Data_type::specify_signal_sample_size(void)
{
char tchoice;
int dolock = 1;
do
{
cout <<"/n";
cout << "Please select the number of samples you wish to use" << "/n/n";
cout << " A. All samples in the file" << "/n";
cout << " S. Specific number of samples"<< "/n/n";
cout << " Your Selection: "; cin >> tchoice;
cout << "/n/n";
tchoice = toupper(tchoice);
if((tchoice == 'A') || (tchoice == 'S')) {dolock = 0;}
} while(dolock >= 1);
cout <<"/n";
if(tchoice == 'A') {determine_sample_number();}
else
{
cout << "/n";
cout << "please enter the number of testing samples you wish to use: ";
cin >> sample_number;
cout << "/n";
}
load_data_into_array();
}

void Data_type::normalize_data_in_array(void)
{
int imax, imin, trigger;
float min, max, rmax, rmin;
int total_dimension = signal_dimensions + nodes_in_output_layer;
int i, j;
max_output_value = new float[nodes_in_output_layer];
min_output_value = new float[nodes_in_output_layer];

for(j = 0; j < total_dimension; j++)
{
trigger = 1;
// identify the minimum and maximum values for each dimension
for(i = 0; i < sample_number; i++)
{
if(i == 0)
{
max = number_of_samples[i].data_in_sample[j];
min = number_of_samples[i].data_in_sample[j];
if((j >= (total_dimension - nodes_in_output_layer)))
{
min_output_value[j - (total_dimension - nodes_in_output_layer)] = min;
max_output_value[j - (total_dimension - nodes_in_output_layer)] = max;
}
}
else
{
if(number_of_samples[i].data_in_sample[j] < min)
{
min = number_of_samples[i].data_in_sample[j];
if(j >= (total_dimension - nodes_in_output_layer))
{min_output_value[j - (total_dimension - nodes_in_output_layer)] = min;}
}

if(number_of_samples[i].data_in_sample[j] > max)
{
max = number_of_samples[i].data_in_sample[j];
if(j >= (total_dimension - nodes_in_output_layer))
{max_output_value[j - (total_dimension - nodes_in_output_layer)] = max;}
}
}
}

imax = int(max_output_value[j - (total_dimension - nodes_in_output_layer)]);
imin = int(min_output_value[j - (total_dimension - nodes_in_output_layer)]);
rmax = max_output_value[j - (total_dimension - nodes_in_output_layer)];
rmin = min_output_value[j - (total_dimension - nodes_in_output_layer)];

if((imax == 1) && (imin == 0) && (rmax <= 1.0) && (rmin <= 0.0))
{trigger = 0;}

if((imax == 1) && (imin == 1) && (rmax <= 1.0) && (rmin <= 1.0))
{trigger = 0;}

if((imax == 0) && (imin == 0) && (rmax <= 0.0) && (rmin <= 0.0))
{trigger = 0;}

// normalize the values in each dimension of the signal
if(trigger != 0)
{
for(i = 0; i < sample_number; i++)
{number_of_samples[i].data_in_sample[j] = (number_of_samples[i].data_in_sample[j] - min) / (max - min);}
}
}
}

void Data_type :: acquire_net_info(int signal, int no_output_nodes)
{
signal_dimensions = signal;
nodes_in_output_layer = no_output_nodes;
}

void Data_type :: load_data_into_array(void)
{
// open the file containing the data
ifstream file_ptr; // pointer to a file
int i;
file_ptr.open(filename, ios::in);

// create dynamic array to hold the specified number of samples
number_of_samples = new sample_data[sample_number];

for(i = 0; i < sample_number; i++)
// create a dynamic array to hold the dimensions of each signal
{number_of_samples[i].data_in_sample = new float[signal_dimensions + nodes_in_output_layer];}

int dimensions = signal_dimensions + nodes_in_output_layer;

//read in data from file and place in array
for(i = 0; i < sample_number; i++)
{
for(int j = 0; j < dimensions; j++)
{file_ptr >> number_of_samples[i].data_in_sample[j];}
}
file_ptr.close();
cout << "/n";
}

void Data_type::delete_signal_array(void)
{delete [] number_of_samples;}

class signal_data // Class for randomizing the input signals
{
public:
int signal_value;
float signal_rank;
};

class Training : public Data_type // Derived Class For Training Data
{
public:
void request_training_data(int net_no); // Function to request data for training
int number_of_epochs;
signal_data *signalpoint;
float rate_of_learning; // learning rate constant used by the net
char presentation_order; // determines fixed or random signal presentation
void scramble_data_in_array(void);
float minimum_average_squared_error;
void delete_signal_data_array(void);
~Training();
};

Training::~Training()
{
delete [] signalpoint;
delete [] max_output_value;
delete [] min_output_value;
}


void Training::request_training_data(int net_no)
{
cout << "Please enter the file name containing the training data for neural net no. "<< net_no << "/n";
cin >> filename;
specify_signal_sample_size();
signalpoint = new signal_data[sample_number];
for(int i = 0; i < sample_number; i++) {signalpoint[i].signal_value = i;}
normalize_data_in_array();
}

void Training::scramble_data_in_array(void)
{
int swap1, swap2, hold_sample;
float hold_rank;

// randomly assign rank to all signals
for(int sig = 0; sig < sample_number; sig ++)
{signalpoint[sig].signal_rank = bedlam((long*)(gaset));}

// reorder signals according to rank
for(swap1 = 0; swap1 < sample_number - 1; swap1++)
{
for(swap2 = swap1 + 1; swap2 < sample_number; swap2++)
{
if(signalpoint[swap1].signal_rank > signalpoint[swap2].signal_rank)
{
hold_sample = signalpoint[swap2].signal_value;
hold_rank = signalpoint[swap2].signal_rank;
signalpoint[swap2].signal_value = signalpoint[swap1].signal_value;
signalpoint[swap2].signal_rank = signalpoint[swap1].signal_rank;
signalpoint[swap1].signal_value = hold_sample;
signalpoint[swap1].signal_rank = hold_rank;
}
}
}
}

void Training::delete_signal_data_array(void)
{
delete [] signalpoint;
delete_signal_array();
}

class Testing : public Training // Derived Class For Testing Data
{
public:
void request_testing_data(int net_no, int test); // Function to request data for testing
float average_squared_error;
};

void Testing::request_testing_data(int net_no, int test)
{
cout << "Please enter the file name containing the testing data for neural net no. "<< net_no << "/n/n";
cin >> filename;
cout << "/n/n";
cout << "For test #" << test + 1 << ":";
cout << "/n/n";
specify_signal_sample_size();
normalize_data_in_array();
}


//************************************************************************//
class NeuralB // class containing neural net structure for backpropagation
{ // along with training and testing data
private:
Training Training_Data; // file name and dynamic array for training
Testing *Test_Data; // files containing data to test the network
void initialize_training_storage_array(int N);
void establish_test_battery_size(void);
void train_net_with_backpropagation(void);
void test_neural_network(int BNET);
public:
Back_Topology Net_Design; // specifications for backpropagating network
int number_of_tests;
void establish_backprop_network(void);
void network_training_testing(int TT);
~NeuralB();
};
//************************************************************************//

// these Neural class member function transmits data from the topology
// to the data storage arrays

NeuralB:: ~NeuralB()
{delete [] Test_Data;}

void NeuralB :: initialize_training_storage_array(int N)
{
Training_Data.acquire_net_info(Net_Design.signal_dimensions, Net_Design.nodes_in_output_layer);
Training_Data.request_training_data(N);
}

void NeuralB :: establish_test_battery_size(void)
{
clrscr();
cout << "Please enter the number of tests you wish to run on the BP neural net: ";
cin >> number_of_tests; cout << "/n";
if(number_of_tests > 0)
{
Test_Data = new Testing[number_of_tests];
for(int i = 0; i < number_of_tests; i++)
{Test_Data[i].acquire_net_info(Net_Design.signal_dimensions, Net_Design.nodes_in_output_layer);}
}
}



// define the establish_backprop_network function
void NeuralB::establish_backprop_network(void)
{
clrscr();
cout << " **** Feedforward network using backpropagation **** " << "/n/n/n";
Net_Design.construct_and_initialize_backprop_network();
} // end establish_backprop_network function

// set the activation functions of the nodes of the network

// define train_net_with_backpropagation function
void NeuralB::train_net_with_backpropagation(void)
{
char savefile;
float output_error, sum_of_error, real_error_difference, target_minimum_average_squared_error;
int sig, layers, sigdim, epoch, hidnode, hidnode2, outnode;
int loopexit = 0;
float *maxdifference;
float *meandifference;

ofstream savefile_ptr;

clrscr();
cout << "please enter the number of epochs you wish to use for training: ";
cin >> Training_Data.number_of_epochs; cout<< "/n";
cout << "please enter the learning rate constant for backpropagation (0-1): ";
cin >> Training_Data.rate_of_learning; cout << "/n";
cout << "please enter the minimum average squared error you wish to target" << "/n";
cin >> target_minimum_average_squared_error; cout << "/n";
do
{
cout << "do you wish to save the mean error, maximum error" << "/n";
cout << "and average squared error for each epoch to a file? (Y or N): "; cin >> savefile;
savefile = toupper(savefile);
if((savefile == 'Y') || (savefile == 'N')) {loopexit = 2;}
cout << "/n";
} while(loopexit <= 1);

if(savefile == 'Y')
{
cout << "please enter the name of the file which will hold the results of training:" << "/n";
cin >> Training_Data.resultsname; cout <<"/n";
savefile_ptr.open(Training_Data.resultsname, ios::out);
}

cout << "Do you want signal presentation in random or fixed order(R or F): ";
cin >> Training_Data.presentation_order; cout << "/n";
Training_Data.presentation_order = toupper(Training_Data.presentation_order); cout << "/n";

maxdifference = new float[Net_Design.nodes_in_output_layer];
meandifference = new float[Net_Design.nodes_in_output_layer];

// intiate backpropagation for appropriate number of epochs
epoch = 0;
do
{
sum_of_error = 0;

for(sig = 0; sig < Training_Data.sample_number; sig++)
{
output_error = 0;
for(sigdim = 0; sigdim < Training_Data.signal_dimensions; sigdim++)
{

if(Net_Design.number_of_hidden_layers == 0) // no hidden layers present
{
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{Net_Design.node_in_output_layer[outnode].processing_unit_input[sigdim] = Training_Data.number_of_samples[Training_Data.signalpoint[sig].signal_value].data_in_sample[sigdim];}
}
else // 1 or 2 hidden layers present
{
for(hidnode = 0; hidnode < Net_Design.hidden_layer_number[0].nodes_in_hidden_layer; hidnode++)
{Net_Design.hidden_layer_number[0].node_in_hidden_layer[hidnode].processing_unit_input[sigdim] = Training_Data.number_of_samples[Training_Data.signalpoint[sig].signal_value].data_in_sample[sigdim];}
}
}

if(Net_Design.number_of_hidden_layers == 2) // two layers are present
{
for(hidnode = 0; hidnode < Net_Design.hidden_layer_number[0].nodes_in_hidden_layer; hidnode++)
{
Net_Design.hidden_layer_number[0].node_in_hidden_layer[hidnode].calculate_output_signal(Net_Design.activation_function_for_hidden_layer);
for(hidnode2 = 0; hidnode2 < Net_Design.hidden_layer_number[1].nodes_in_hidden_layer; hidnode2++)
{Net_Design.hidden_layer_number[1].node_in_hidden_layer[hidnode2].processing_unit_input[hidnode] = Net_Design.hidden_layer_number[0].node_in_hidden_layer[hidnode].output_signal;}
}
}

if(Net_Design.number_of_hidden_layers > 0)
{
for(hidnode = 0; hidnode < Net_Design.hidden_layer_number[Net_Design.number_of_hidden_layers -1].nodes_in_hidden_layer; hidnode++)
{
Net_Design.hidden_layer_number[Net_Design.number_of_hidden_layers -1].node_in_hidden_layer[hidnode].calculate_output_signal(Net_Design.activation_function_for_hidden_layer);
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{Net_Design.node_in_output_layer[outnode].processing_unit_input[hidnode] = Net_Design.hidden_layer_number[Net_Design.number_of_hidden_layers -1].node_in_hidden_layer[hidnode].output_signal;}
}
}
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{
Net_Design.node_in_output_layer[outnode].calculate_output_signal(Net_Design.activation_function_for_output_layer);
Net_Design.node_in_output_layer[outnode].calculate_output_error_information_term(Training_Data.number_of_samples[Training_Data.signalpoint[sig].signal_value].data_in_sample[Training_Data.signal_dimensions + outnode], Net_Design.activation_function_for_output_layer);
// calculate the instantaneous sum of squared errors (Haykin, 1994)
real_error_difference = (pow(Net_Design.node_in_output_layer[outnode].error_difference_squared, 0.5)) * (Training_Data.max_output_value[outnode] - Training_Data.min_output_value[outnode]);
output_error += 0.5 * pow(real_error_difference, 2.0);

// calculate maximum and mean absolute error difference for each node
real_error_difference = Net_Design.node_in_output_layer[outnode].absolute_error_difference * (Training_Data.max_output_value[outnode] - Training_Data.min_output_value[outnode]);
meandifference[outnode] += real_error_difference / float(Training_Data.sample_number);
if(sig == 0) {maxdifference[outnode] = real_error_difference;}
else
{
if(real_error_difference > maxdifference[outnode])
{maxdifference[outnode] = real_error_difference;}
}
}

// average squared error for each signal is saved
sum_of_error += output_error / float (Training_Data.sample_number);

// backpropagation of error will depend on the number of hidden layers
if(Net_Design.number_of_hidden_layers > 0)
{ // backpropagate from output node to adjacent hidden layer
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{
for(hidnode = 0; hidnode < Net_Design.hidden_layer_number[Net_Design.number_of_hidden_layers - 1].nodes_in_hidden_layer; hidnode++)
{Net_Design.hidden_layer_number[Net_Design.number_of_hidden_layers - 1].node_in_hidden_layer[hidnode].error_information_term += Net_Design.node_in_output_layer[outnode].error_information_term * Net_Design.node_in_output_layer[outnode].weight_of_inputs[hidnode];}
}
// calculate error information term for each node in hiddenlayer
for(hidnode = 0; hidnode < Net_Design.hidden_layer_number[Net_Design.number_of_hidden_layers - 1].nodes_in_hidden_layer; hidnode++)
{Net_Design.hidden_layer_number[Net_Design.number_of_hidden_layers - 1].node_in_hidden_layer[hidnode].calculate_hidden_error_information_term(Net_Design.activation_function_for_hidden_layer);}


if(Net_Design.number_of_hidden_layers > 1)
{ // backpropagate error from hidden layer 2 to hidden layer 1
for(hidnode2 = 0; hidnode2 < Net_Design.hidden_layer_number[1].nodes_in_hidden_layer; hidnode2++)
{
for(hidnode = 0; hidnode < Net_Design.hidden_layer_number[0].nodes_in_hidden_layer; hidnode++)
{Net_Design.hidden_layer_number[0].node_in_hidden_layer[hidnode].error_information_term += Net_Design.hidden_layer_number[1].node_in_hidden_layer[hidnode2].error_information_term * Net_Design.hidden_layer_number[1].node_in_hidden_layer[hidnode2].weight_of_inputs[hidnode];}
}
for(hidnode = 0; hidnode < Net_Design.hidden_layer_number[0].nodes_in_hidden_layer; hidnode++)
{Net_Design.hidden_layer_number[0].node_in_hidden_layer[hidnode].calculate_hidden_error_information_term(Net_Design.activation_function_for_hidden_layer);}
}
}

// update the networks output nodes
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{Net_Design.node_in_output_layer[outnode].calculate_weight_and_bias_correction_terms(Training_Data.rate_of_learning);}

// update the networks hidden nodes (if they exist)
if(Net_Design.number_of_hidden_layers > 0)
{
for(layers = 0; layers < Net_Design.number_of_hidden_layers; layers++)
{
for(hidnode = 0; hidnode < Net_Design.hidden_layer_number[layers].nodes_in_hidden_layer; hidnode++)
{Net_Design.hidden_layer_number[layers].node_in_hidden_layer[hidnode].calculate_weight_and_bias_correction_terms(Training_Data.rate_of_learning);}
}
}
} // end sig loop

// save error information (if required)
if(savefile == 'Y')
{
savefile_ptr << epoch + 1 << " ";
savefile_ptr << sum_of_error << " ";
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{savefile_ptr << maxdifference[outnode] << " " << meandifference[outnode] << " ";}
savefile_ptr << endl;
cout.width(6);
clrscr();
cout << "Epoch #"<< epoch + 1 <<" is completed " << endl;
}

if(epoch == 0)
{Training_Data.minimum_average_squared_error = sum_of_error;}
else
{
if(sum_of_error < Training_Data.minimum_average_squared_error)
{Training_Data.minimum_average_squared_error = sum_of_error;}
}

// scramble the order of signal presentation (if required)
if(Training_Data.presentation_order == 'R')
{Training_Data.scramble_data_in_array();}

for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{ maxdifference[outnode] = 0.0; meandifference[outnode] = 0.0;}

if(Training_Data.minimum_average_squared_error <= target_minimum_average_squared_error)
{break;}

epoch = epoch + 1;

} while(epoch < Training_Data.number_of_epochs);

savefile_ptr.close();

// delete arrays holding the training data
Training_Data.delete_signal_data_array();
delete [] maxdifference;
delete [] meandifference;
} // end of backpropagation function


// define the function that tests the neural network
void NeuralB::test_neural_network(int BNET)
{
float output_error, sum_of_error, real_output;
int sig, sigdim, hidnode, hidnode2, outnode;

int bnet = BNET;
for(int t = 0; t < number_of_tests; t++)
{
Test_Data[t].request_testing_data(bnet, t);

sum_of_error = 0;

cout << "please enter the name of the file wich will hold the results of test: "<< t+1 << "/n";
cin >> Test_Data[t].resultsname; cout <<"/n";
ofstream savefile_ptr(Test_Data[t].resultsname);

for(sig = 0; sig < Test_Data[t].sample_number; sig++)
{
output_error = 0;
savefile_ptr << sig + 1 << " ";

for(sigdim = 0; sigdim < Test_Data[t].signal_dimensions; sigdim++)
{

if(Net_Design.number_of_hidden_layers == 0) // no hidden layers present
{
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{Net_Design.node_in_output_layer[outnode].processing_unit_input[sigdim] = Test_Data[t].number_of_samples[sig].data_in_sample[sigdim];}
}
else // 1 or 2 hidden layers present
{
for(hidnode = 0; hidnode < Net_Design.hidden_layer_number[0].nodes_in_hidden_layer; hidnode++)
{Net_Design.hidden_layer_number[0].node_in_hidden_layer[hidnode].processing_unit_input[sigdim] = Test_Data[t].number_of_samples[sig].data_in_sample[sigdim];}
}
}

if(Net_Design.number_of_hidden_layers == 2) // two layers are present
{
for(hidnode = 0; hidnode < Net_Design.hidden_layer_number[0].nodes_in_hidden_layer; hidnode++)
{
Net_Design.hidden_layer_number[0].node_in_hidden_layer[hidnode].calculate_output_signal(Net_Design.activation_function_for_hidden_layer);
for(hidnode2 = 0; hidnode2 < Net_Design.hidden_layer_number[1].nodes_in_hidden_layer; hidnode2++)
{Net_Design.hidden_layer_number[1].node_in_hidden_layer[hidnode2].processing_unit_input[hidnode] = Net_Design.hidden_layer_number[0].node_in_hidden_layer[hidnode].output_signal;}
}
}

if(Net_Design.number_of_hidden_layers > 0)
{
for(hidnode = 0; hidnode < Net_Design.hidden_layer_number[Net_Design.number_of_hidden_layers -1].nodes_in_hidden_layer; hidnode++)
{
Net_Design.hidden_layer_number[Net_Design.number_of_hidden_layers -1].node_in_hidden_layer[hidnode].calculate_output_signal(Net_Design.activation_function_for_hidden_layer);
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{Net_Design.node_in_output_layer[outnode].processing_unit_input[hidnode] = Net_Design.hidden_layer_number[Net_Design.number_of_hidden_layers -1].node_in_hidden_layer[hidnode].output_signal;}
}
}
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{
Net_Design.node_in_output_layer[outnode].calculate_output_signal(Net_Design.activation_function_for_output_layer);
Net_Design.node_in_output_layer[outnode].calculate_output_error_information_term(Test_Data[t].number_of_samples[sig].data_in_sample[Test_Data[t].signal_dimensions + outnode], Net_Design.activation_function_for_output_layer);
}

// convert normalized target output data and send to file
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{
real_output = Test_Data[t].min_output_value[outnode] + (Test_Data[t].number_of_samples[sig].data_in_sample[outnode + Test_Data[t].signal_dimensions] * (Test_Data[t].max_output_value[outnode] - Test_Data[t].min_output_value[outnode]));
savefile_ptr << real_output << " ";
}

savefile_ptr << " ";

// convert normalized output data and send to file
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{
real_output = Test_Data[t].min_output_value[outnode] + (Net_Design.node_in_output_layer[outnode].output_signal * (Test_Data[t].max_output_value[outnode] - Test_Data[t].min_output_value[outnode]));
savefile_ptr << real_output << " ";
}

// send absolute differences between each node and its output to a file
for(outnode = 0; outnode < Net_Design.nodes_in_output_layer; outnode++)
{
real_output = (pow(Net_Design.node_in_output_layer[outnode].error_difference_squared, 0.5)) * (Test_Data[t].max_output_value[outnode] - Test_Data[t].min_output_value[outnode]);
savefile_ptr << real_output << " ";
real_output = pow(real_output, 2.0);
output_error += 0.5 * real_output;
}
// sum square of error
savefile_ptr << output_error << "/n";
if(sig == Test_Data[t].sample_number - 1)
{savefile_ptr.close();}

sum_of_error += output_error;
}
Test_Data[t].average_squared_error = sum_of_error / Test_Data[t].sample_number;
Test_Data[t].delete_signal_array();
}
} // end test neural network function

void NeuralB::network_training_testing(int TT)
{
int tt = TT;
int menu_choice;

clrscr();
cout << "/n/n/n/n";
cout << "**************** Operations Menu ****************" << "/n/n";
cout << " Please select one of the following options:" <<"/n/n";
cout << " 1. Train Backprop network only " <<"/n/n";
cout << " 2. Test Backprop network only " <<"/n/n";
cout << " 3. Train and Test Backprop network" <<"/n/n";
cout << "*************************************************" << "/n/n";
cout << " Your choice?: "; cin >> menu_choice;
cout << "/n/n";
switch(menu_choice)
{
case 1:
initialize_training_storage_array(tt);
train_net_with_backpropagation();
break;

case 2:
establish_test_battery_size();
if(number_of_tests > 0)
{test_neural_network(tt);}
break;

case 3:
initialize_training_storage_array(tt);
train_net_with_backpropagation();
establish_test_battery_size();
if(number_of_tests > 0)
{test_neural_network(tt);}
break;

default:network_training_testing(tt);
}
}
// This concludes the backpropagation section of the program

//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

// (ART 1) Define base class for Interface and Cluster units of the
// Adaptive Resonance Theory Neural Network 1

class ART_units
{
public:
float *input_value;
float *output_value;
float *input_weight_vector;
int number_of_inputs;
int number_of_outputs;
float activation;
void establish_input_output_arrays(void);
virtual void establish_input_weight_vector_array(void);
virtual void initialize_inputs_and_weights(void);
~ART_units();
};

ART_units::~ART_units()
{
delete [] input_value;
delete [] output_value;
delete [] input_weight_vector;
}

void ART_units::establish_input_output_arrays(void)
{
input_value = new float[number_of_inputs];
output_value = new float[number_of_outputs];
}

void ART_units::establish_input_weight_vector_array(void)
{input_weight_vector = new float[number_of_inputs - 1];}

void ART_units::initialize_inputs_and_weights(void)
{
for(int w = 0; w < number_of_inputs - 1; w++)
{input_weight_vector[w] = 1.0;}

for(int c = 1; c < number_of_inputs; c++)
{input_value[c] = 0.0;}
activation = 0.0;
}

// establish Interface node attributes
class Interface_units: public ART_units
{
public:
void recompute_activation(int winning_cluster);
void calculate_output_value(int G1);
};

void Interface_units::recompute_activation(int winning_cluster)
{activation = input_value[0] * input_weight_vector[winning_cluster];}

void Interface_units::calculate_output_value(int G1)
{
float feedback_signal, node_output, two_thirds_rule;
feedback_signal = 0.0;
// calculate feedback signal through use of weighted sum
for(int f = 0; f < number_of_inputs-1; f++)
{feedback_signal+=input_weight_vector[f]*input_value[f+1];}

two_thirds_rule = feedback_signal + input_value[0] + float(G1);

// use Two Thirds Rule to determine node output
if(two_thirds_rule >= 2.0) {node_output = 1.0;} else {node_output = 0.0;}

// establish output vector to cluster units
for(int p = 0; p < number_of_outputs; p++)
{output_value[p] = node_output;}
}

// establish Cluster node attributes
class Cluster_units: public ART_units
{
public:
int cluster_tag;
float net_input;
void establish_input_weight_vector_array(void);
void initialize_inputs_and_weights(void);
void calculate_net_input(void);
void establish_node_output(void);
Cluster_units(); // default constructor
};

Cluster_units::Cluster_units()
{cluster_tag = 0;}

void Cluster_units::establish_input_weight_vector_array(void)
{input_weight_vector = new float[number_of_inputs];}

void Cluster_units::initialize_inputs_and_weights(void)
{
for(int c = 0; c < number_of_inputs; c++)
{input_weight_vector[c] = 1.0 / (1.0 + number_of_inputs);}
}

void Cluster_units::calculate_net_input(void)
{
net_input = 0.0;
for(int n = 0; n < number_of_inputs; n++)
{net_input += input_value[n] * input_weight_vector[n];}
}

void Cluster_units::establish_node_output(void)
{
for(int oput = 0; oput < number_of_outputs - 1; oput++)
if(activation >= 0.0)
{output_value[oput] = activation;}
else
{output_value[oput] = 0.0;}
}

// establish Inputs unit attributes
class Input_units {public: float signal_value;};

// establish ART1 neural network attributes
class ART_Topology
{
public:
char netcreate;
int clustercount;
int dimensions_of_signal;
int number_of_cluster_units;
int reset_value;
int resetcount;
float vigilance_parameter;
float norm_of_activation_vector;
float norm_of_input_vector;
float weight_update_parameter;
int cluster_champ;
int clusterange;
Input_units *node_in_input_layer;
Interface_units *node_in_interface_layer;
Cluster_units *node_in_cluster_layer;
void establish_net_topology(void);
void upload_network(void);
void transmit_pattern_to_interface(void);
void transmit_pattern_to_cluster(void);
void broadcast_output_to_cluster_layer(void);
void cluster_nodes_compete_for_activation(int train_or_test);
void compute_norm_of_activation_vector(void);
void compute_norm_of_input_vector(void);
void recompute_activation_vector_of_interface_layer(void);
void update_the_network(void);
void set_cluster_activation_to_zero(void);
void savenet(void);
ART_Topology();
~ART_Topology(); // class destructor
};

ART_Topology::ART_Topology()
{
clustercount = 0;
clusterange = 0;
resetcount = 0;
}

ART_Topology::~ART_Topology()
{
delete [] node_in_input_layer;
delete [] node_in_interface_layer;
delete [] node_in_cluster_layer;
}

void ART_Topology::establish_net_topology(void)
{
weight_update_parameter = 2.0;
node_in_input_layer = new Input_units[dimensions_of_signal];
node_in_interface_layer = new Interface_units[dimensions_of_signal];
node_in_cluster_layer = new Cluster_units[number_of_cluster_units];

// Establish interface layer of ART1 network
for(int I = 0; I < dimensions_of_signal; I++)
{
node_in_interface_layer[I].number_of_inputs = number_of_cluster_units + 1;
node_in_interface_layer[I].number_of_outputs = number_of_cluster_units;
node_in_interface_layer[I].establish_input_output_arrays();
node_in_interface_layer[I].establish_input_weight_vector_array();
node_in_interface_layer[I].initialize_inputs_and_weights();
}

// Establish cluster layer of ART1 network
for(int C = 0; C < number_of_cluster_units; C++)
{
node_in_cluster_layer[C].number_of_inputs = dimensions_of_signal;
node_in_cluster_layer[C].number_of_outputs = dimensions_of_signal + 1;
node_in_cluster_layer[C].establish_input_output_arrays();
node_in_cluster_layer[C].establish_input_weight_vector_array();
node_in_cluster_layer[C].initialize_inputs_and_weights();
}

}

void ART_Topology::upload_network(void)
{
char getname[13];
ifstream get_ptr;
int netid, node, dim;
int dolock = 0;

do
{
cout << "/n/n";
cout << "Please enter the name of the file which holds the ART1 Network" << "/n";
cin >> getname; cout << "/n";
get_ptr.open(getname, ios::in);
get_ptr >> netid;
if(netid == 2) {dolock = 1;}
else
{
cout << "Error** file contents do not match ART1 specifications" << "/n";
cout << "try again" << "/n";
get_ptr.close();
}
} while(dolock <= 0);

get_ptr >> dimensions_of_signal;
get_ptr >> weight_update_parameter;
get_ptr >> vigilance_parameter;
get_ptr >> clusterange;
get_ptr >> clustercount;
get_ptr >> number_of_cluster_units;

node_in_input_layer = new Input_units[dimensions_of_signal];
node_in_interface_layer = new Interface_units[dimensions_of_signal];
node_in_cluster_layer = new Cluster_units[number_of_cluster_units];

for(node = 0; node < dimensions_of_signal; node++)
{
node_in_interface_layer[node].number_of_inputs = number_of_cluster_units + 1;
node_in_interface_layer[node].number_of_outputs = number_of_cluster_units;
node_in_interface_layer[node].establish_input_output_arrays();
node_in_interface_layer[node].establish_input_weight_vector_array();
node_in_interface_layer[node].initialize_inputs_and_weights();
for(dim = 1; dim < number_of_cluster_units + 1; dim++)
{get_ptr >> node_in_interface_layer[node].input_weight_vector[dim];}
}

for(node = 0; node < number_of_cluster_units; node++)
{
node_in_cluster_layer[node].number_of_inputs = dimensions_of_signal;
node_in_cluster_layer[node].number_of_outputs = dimensions_of_signal + 1;
node_in_cluster_layer[node].establish_input_output_arrays();
node_in_cluster_layer[node].establish_input_weight_vector_array();
node_in_cluster_layer[node].initialize_inputs_and_weights();
get_ptr >> node_in_cluster_layer[node].cluster_tag;
for(dim = 0; dim < dimensions_of_signal; dim++)
{get_ptr >> node_in_cluster_layer[node].input_weight_vector[dim];}
}
get_ptr.close();
}

void ART_Topology::transmit_pattern_to_interface(void)
{
for(int d = 0; d < dimensions_of_signal; d++)
{
node_in_interface_layer[d].input_value[0] = node_in_input_layer[d].signal_value;
node_in_interface_layer[d].activation = node_in_input_layer[d].signal_value;
}
}

void ART_Topology::transmit_pattern_to_cluster(void)
{
int c;
for(int d = 0; d < dimensions_of_signal; d++)
{
for(c = 0; c < number_of_cluster_units; c++)
{node_in_cluster_layer[c].input_value[d] = node_in_input_layer[d].signal_value;}
}
}

void ART_Topology::broadcast_output_to_cluster_layer(void)
{
int Gain_one;
int cluster_active = 0;
int d, c;
for(c = 0; c < number_of_cluster_units; c++)
{if(node_in_cluster_layer[c].activation == 1.0) {cluster_active = 1;} }
compute_norm_of_input_vector();

if((cluster_active != 1) && (norm_of_input_vector > 0.0))
{Gain_one = 1;} else {Gain_one = 0;}

// establish interface output vector
for(d = 0; d < dimensions_of_signal; d++)
{node_in_interface_layer[d].calculate_output_value(Gain_one);}

//transmit interface output to units in cluster layer
for(d = 0; d < dimensions_of_signal; d++)
{
for(c = 0; c < number_of_cluster_units; c++)
{node_in_cluster_layer[c].input_value[d] = node_in_interface_layer[d].output_value[c];}
}
}

void ART_Topology::cluster_nodes_compete_for_activation(int train_or_test)
{
int d, cluster;
float champion = 0.0;

for(cluster = 0; cluster < clusterange + 1; cluster++)
{
if(node_in_cluster_layer[cluster].activation != -1.0)
{
node_in_cluster_layer[cluster].calculate_net_input();
if(node_in_cluster_layer[cluster].net_input > champion)
{
champion = node_in_cluster_layer[cluster].net_input;
cluster_champ = cluster;
}
}
}
if((node_in_cluster_layer[cluster_champ].cluster_tag == 0) && (train_or_test < 2))
{
node_in_cluster_layer[cluster_champ].cluster_tag = clustercount + 1;
clustercount = clustercount + 1;
}

if(train_or_test < 2)
{

for(cluster = 0; cluster < clusterange + 1; cluster++)
{
if(cluster == cluster_champ)
{node_in_cluster_layer[cluster].activation = 1.0;}
else
{
if(node_in_cluster_layer[cluster].activation != -1.0)
{node_in_cluster_layer[cluster].activation = 0.0;}
}
node_in_cluster_layer[cluster].establish_node_output();

// send output signals to Interface layer
for(d = 0; d < dimensions_of_signal; d++)
{node_in_interface_layer[d].input_value[cluster + 1] = node_in_cluster_layer[cluster].output_value[d];}
}
}
}

void ART_Topology::compute_norm_of_activation_vector(void)
{
norm_of_activation_vector = 0.0;
for(int d = 0; d < dimensions_of_signal; d++)
{norm_of_activation_vector += node_in_interface_layer[d].activation;}
compute_norm_of_input_vector();
}

void ART_Topology::compute_norm_of_input_vector(void)
{
norm_of_input_vector = 0.0;
for(int d = 0; d < dimensions_of_signal; d++)
{norm_of_input_vector += node_in_input_layer[d].signal_value;}
}

void ART_Topology::recompute_activation_vector_of_interface_layer(void)
{
for(int d = 0; d < dimensions_of_signal; d++)
{node_in_interface_layer[d].recompute_activation(cluster_champ);}
}

void ART_Topology:: update_the_network(void)
{
recompute_activation_vector_of_interface_layer();
compute_norm_of_activation_vector();
float ratio_test = norm_of_activation_vector / norm_of_input_vector;

if(ratio_test < vigilance_parameter)
{
node_in_cluster_layer[cluster_champ].activation = -1.0;
reset_value = 1;
resetcount += reset_value;
if(resetcount == number_of_cluster_units - 1)
{
clusterange = clusterange + 1;
if(clusterange > number_of_cluster_units)
{clusterange = number_of_cluster_units;}
}
}
else
{
// update the weights of the champion cluster unit
for(int u = 0; u < node_in_cluster_layer[cluster_champ].number_of_inputs; u++)
{node_in_cluster_layer[cluster_champ].input_weight_vector[u] = (weight_update_parameter * node_in_interface_layer[u].activation * node_in_cluster_layer[cluster_champ].input_weight_vector[u]) / ((weight_update_parameter - 1.0) + norm_of_activation_vector);}
for(int n = 0; n < dimensions_of_signal; n++)
{node_in_interface_layer[n].input_weight_vector[cluster_champ] = node_in_interface_layer[n].input_weight_vector[cluster_champ] * node_in_interface_layer[n].activation;}

reset_value = 0;
resetcount = 0;
}
}

void ART_Topology::set_cluster_activation_to_zero(void)
{
for(int cnode = 0; cnode < clusterange + 1; cnode++)
{node_in_cluster_layer[cnode].activation = 0.0;}
}

void ART_Topology::savenet(void)
{
char savename[13];
ofstream save_ptr;
int node, dim;

cout << "/n/n";
cout << "Please enter the name of the file which will hold the ART network"<<"/n";
cin >> savename; cout <<"/n";
save_ptr.open(savename, ios::out);

save_ptr << 2 << "/n"; //network identifier number
save_ptr << dimensions_of_signal << "/n";
save_ptr << weight_update_parameter << "/n";
save_ptr << vigilance_parameter << "/n";
save_ptr << clusterange << "/n";
save_ptr << clustercount << "/n";
save_ptr << number_of_cluster_units << "/n";

for(node = 0; node < dimensions_of_signal; node++)
{
for(dim = 1; dim < number_of_cluster_units + 1; dim++)
{save_ptr << node_in_interface_layer[node].input_weight_vector[dim] << " ";}
save_ptr << "/n";
}

for(node = 0; node < number_of_cluster_units; node++)
{
save_ptr << node_in_cluster_layer[node].cluster_tag << "/n";
for(dim = 0; dim < dimensions_of_signal; dim++)
{save_ptr << node_in_cluster_layer[node].input_weight_vector[dim] << " ";}
save_ptr << "/n";
}
save_ptr.close();
}

// Classes which specifies the containers of ART training and test data
class ART_Training_Data : public Data_type
{
public:
void determine_sample_number(void);
void load_data_into_array(void);
virtual void request_ART_data(int net_no);
};


void ART_Training_Data::load_data_into_array(void)
{
int d, i;
float dimensions;

// open the file containing the data
ifstream Afile_ptr; // pointer to a file
Afile_ptr.open(filename, ios::in);
//create a dynamic array to hold the specified number of samples
number_of_samples = new sample_data[sample_number];

for(i = 0; i < sample_number; i++)
{number_of_samples[i].data_in_sample = new float[signal_dimensions];}

// read in data from file and place in array
for(i = 0; i < sample_number; i++)
{
for(d = 0; d < signal_dimensions; d++)
{
Afile_ptr >> dimensions;
number_of_samples[i].data_in_sample[d] = dimensions;
}
}
Afile_ptr.close();
}

void ART_Training_Data :: determine_sample_number(void)
{
ifstream dfile_ptr; // pointer to a file
dfile_ptr.open(filename, ios::in);

float hold;
int lock = 1;
sample_number = 0;

do
{
if(dfile_ptr.eof()){lock = 0;}
else
{dfile_ptr >> hold; sample_number += 1;}
}while(lock > 0);

dfile_ptr.close();
sample_number = int(sample_number / signal_dimensions);
}

void ART_Training_Data::request_ART_data(int net_no)
{
cout << "Please enter the file name containing the training data for ART network no. "<< net_no << "/n";
cin >> filename; cout << "/n";
specify_signal_sample_size();
}

class ART_Test_Data : public ART_Training_Data
{public: void request_ART_data(int net_no);};

void ART_Test_Data::request_ART_data(int net_no)
{
cout << "Please enter the file name containing the test data for ART network no. " << net_no << "/n";
cin >> filename; cout << "/n";
specify_signal_sample_size();
}

//************************************************************************//
class NeuralA // class containing the ART1 neural net structure
{ // along with training and testing data
private:
ART_Training_Data ART_Train;
ART_Test_Data * ART_Test; // the number of test is variable
int number_of_ART_tests;
void initialize_ART_training_storage_array(int AN);
void establish_ART_test_battery_size(void);
void train_ART_network(int ARTN);
void test_ART_network(int ANET);
public:
ART_Topology ART_Design;
void construct_ART_network(void);
void network_training_testing(int TT);
~NeuralA();
};
//****************************************************************************//

NeuralA::~NeuralA()
{ delete [] ART_Test; }

void NeuralA::construct_ART_network(void)
{
int looploc = 0;
clrscr();
cout << " **** Adaptive Resonance Theory network for binary signals **** " <<"/n/n/n";
do
{
cout <<"/n";
cout << "Do you wish to" << "/n/n";
cout << "C. Create your own ART1 Network " << "/n";
cout << "U. Upload an existing ART1 Network " << "/n/n";
cout << "Your choice?: "; cin >> ART_Design.netcreate;
cout << "/n/n";
ART_Design.netcreate = toupper(ART_Design.netcreate);
if((ART_Design.netcreate == 'C') || (ART_Design.netcreate == 'U')) {looploc = 1;}
} while(looploc <= 0);
if(ART_Design.netcreate == 'U')
{ART_Design.upload_network();}
else
{
cout << "/n";
cout << "Please enter the dimensions of the ART network's input signal vector: ";
cin >> ART_Design.dimensions_of_signal; cout << "/n";
cout << "Please enter the vigilance parameter of the ART network: ";
cin >> ART_Design.vigilance_parameter; cout << "/n";
}
}

void NeuralA::initialize_ART_training_storage_array(int AN)
{
int AT = AN;
ART_Train.acquire_net_info(ART_Design.dimensions_of_signal, ART_Design.number_of_cluster_units);
ART_Train.request_ART_data(AT);
if(ART_Design.netcreate == 'C') // constructing new network
{
ART_Design.number_of_cluster_units = ART_Train.sample_number;
ART_Design.establish_net_topology();
}
}


void NeuralA::train_ART_network(int ARTN)
{
int dim, nodes_available_for_clustering;
char savetrain;
int dolock = 0;

clrscr();
cout << "/n/n";
cout << "For Neural Network #" << ARTN << "/n";
do
{
cout << "do you wish to save the ART Training results to a file? (Y or N): ";
cin >> savetrain;
savetrain = toupper(savetrain);
if((savetrain == 'N') || (savetrain == 'Y')) {dolock = 1;}
cout << "/n";
} while(dolock <= 0);

if(savetrain == 'Y')
{
cout << "please enter the name of the file to hold the results of the ART Training" << "/n";
cin >> ART_Train.resultsname; cout << "/n";
}

for(int pattern = 0; pattern < ART_Train.sample_number; pattern++)
{
// present pattern to input layer
for(dim = 0; dim < ART_Design.dimensions_of_signal; dim++)
{ART_Design.node_in_input_layer[dim].signal_value = ART_Train.number_of_samples[pattern].data_in_sample[dim];}

nodes_available_for_clustering = ART_Design.number_of_cluster_units;

do
{
ART_Design.transmit_pattern_to_interface();
ART_Design.broadcast_output_to_cluster_layer();
ART_Design.cluster_nodes_compete_for_activation(1);
ART_Design.update_the_network();
nodes_available_for_clustering = nodes_available_for_clustering - ART_Design.reset_value;
if(nodes_available_for_clustering < 1) // input pattern cannot be clustered
{
// clrscr();
cout << "Input pattern #" << pattern + 1 << ": ";
for(dim = 0; dim < ART_Design.dimensions_of_signal; dim++)
{cout << int(ART_Design.node_in_input_layer[dim].signal_value);}
cout << " cannot be clustered" << "/n";
break;
}
} while (ART_Design.reset_value >=1);

if(savetrain == 'Y')
{
ofstream ART_savefile_ptr(ART_Train.resultsname, ios::out|ios::app);
ART_savefile_ptr << pattern + 1 << " ";
for(dim = 0; dim < ART_Design.dimensions_of_signal; dim++)
{ART_savefile_ptr << int(ART_Design.node_in_input_layer[dim].signal_value);}
ART_savefile_ptr << " " << ART_Design.node_in_cluster_layer[ART_Design.cluster_champ].cluster_tag << "/n";
ART_savefile_ptr.close();
}
ART_Design.set_cluster_activation_to_zero();
}
// delete array containing training data
ART_Train.delete_signal_array();
}

void NeuralA::establish_ART_test_battery_size(void)
{
cout <<"Please enter the number of tests you wish to run on the ART neural network: ";
cin >> number_of_ART_tests; cout <<"/n";
// create testing array
if(number_of_ART_tests > 0)
{
ART_Test = new ART_Test_Data[number_of_ART_tests];
for(int t = 0; t < number_of_ART_tests; t++)
{ART_Test[t].acquire_net_info(ART_Design.dimensions_of_signal, ART_Design.number_of_cluster_units);}
}
}

void NeuralA::test_ART_network(int ANET)
{
int tnet, dim, pattern;

tnet = ANET;
for(int Atest = 0; Atest < number_of_ART_tests; Atest++)
{
ART_Test[Atest].request_ART_data(tnet);
cout << "For ART1 neural network #" << ANET <<" and test #"<<Atest+1<<":" <<"/n";
cout << "please enter the name of the file to hold the results of the ART Testing " << "/n";
cin >> ART_Test[Atest].resultsname; cout << "/n";
ofstream ART_savefile_ptr(ART_Test[Atest].resultsname);

for(pattern = 0; pattern < ART_Test[Atest].sample_number; pattern++)
{
for(dim = 0; dim < ART_Design.dimensions_of_signal; dim++)
{ART_Design.node_in_input_layer[dim].signal_value = ART_Test[Atest].number_of_samples[pattern].data_in_sample[dim];}

ART_Design.transmit_pattern_to_cluster();
ART_Design.cluster_nodes_compete_for_activation(2);

ART_savefile_ptr <<pattern + 1<<" ";
for(dim = 0; dim < ART_Design.dimensions_of_signal; dim++)
{ART_savefile_ptr << int(ART_Design.node_in_input_layer[dim].signal_value);}

ART_savefile_ptr << " " << ART_Design.node_in_cluster_layer[ART_Design.cluster_champ].cluster_tag << "/n";
}

ART_savefile_ptr.close(); // end of test
ART_Test[Atest].delete_signal_array();
}

}

void NeuralA::network_training_testing(int TT)
{
int tt = TT;
int menu_choice;

clrscr();
cout << "/n/n/n/n";
cout << "**************** Operations Menu ****************" << "/n/n";
cout << " Please select one of the following options:" <<"/n/n";
cout << " 1. Train ART1 network only " <<"/n/n";
if(ART_Design.netcreate == 'U')
{
cout << " 2. Test ART1 network only " <<"/n/n";
cout << " 3. Train and Test ART1 network" <<"/n/n";
}
else
{
cout << " 2. Train and Test ART1 network" <<"/n/n";
}
cout << "*************************************************" << "/n/n";
cout << " Your choice?: "; cin >> menu_choice;
cout << "/n/n";
if((menu_choice == 2) && (ART_Design.netcreate == 'C')) {menu_choice = 3;}
if((menu_choice == 3) && (ART_Design.netcreate == 'U')) {menu_choice = 3;}

switch(menu_choice)
{
case 1:
initialize_ART_training_storage_array(tt);
train_ART_network(tt);
break;

case 2:
establish_ART_test_battery_size();
if(number_of_ART_tests > 0)
{test_ART_network(tt);}
break;

case 3:
initialize_ART_training_storage_array(tt);
train_ART_network(tt);
establish_ART_test_battery_size();
if(number_of_ART_tests > 0)
{test_ART_network(tt);}
break;

default:network_training_testing(tt);
}

}

// This concludes the ART1 section of the program
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

// (Kohonen) Define base class for the Clustering Nodes of
// the Kohonen Self-Organizing Map

//************************ ATTENTION ************************************
// Note that the Class Kohonen_units will also contain variables and
// functions relevant to the Radial Basis Function Neural Network (RBFN)
//***********************************************************************
class Kohonen_units: public ART_units
{
public:
void establish_input_weight_vector_array(void);
void initialize_inputs_and_weights(void);
void calculate_sum_square_Euclidean_distance(void);
void update_the_weights(float learning_rate);
Kohonen_units(); // default constructor
//*******************************************************
float transfer_function_width; // RBFN
float Gaussian_transfer_output; // RBFN
void execute_Gaussian_transfer_function(void); // RBFN
//*******************************************************
};

Kohonen_units::Kohonen_units()
{number_of_outputs = 1;}

void Kohonen_units::establish_input_weight_vector_array(void)
{input_weight_vector = new float[number_of_inputs];}

void Kohonen_units::initialize_inputs_and_weights(void)
{
for(int k = 0; k < number_of_inputs; k++)
{input_weight_vector[k] = bedlam((long*)(gaset));}
}

void Kohonen_units::calculate_sum_square_Euclidean_distance(void)
{
double sumsquare;
float ss1;
int ci;
output_value[0] = 0.0;
for(int k = 0; k < number_of_inputs; k++)
{
ci = k;

if(input_value[ci] == 0.0)
{
sumsquare = pow(input_weight_vector[ci], 2.0);
}
else
{
sumsquare = pow(fabs(input_weight_vector[ci] - input_value[ci]), 2.0);
}
output_value[0] += sumsquare;
// cout << output_value[0] << "/n";
// cin >> output_value[0];
}
ss1 = output_value[0];

output_value[0] = sqrt(fabs(ss1));

}

void Kohonen_units::update_the_weights(float learning_rate)
{
for(int k = 0; k < number_of_inputs; k++)
{input_weight_vector[k] = input_weight_vector[k] + (learning_rate * (input_value[k] - input_weight_vector[k]));}
}
// RBFN //
void Kohonen_units::execute_Gaussian_transfer_function(void)
{
float transfer_ratio = (-1.0) * pow((output_value[0] / transfer_function_width), 2.0);
Gaussian_transfer_output = exp(transfer_ratio);
}

// define class and member functions which define Kohonen Topology
class Kohonen_Topology
{
public:
int kluster_champ;
int dimensions_of_signal;
int maximum_number_of_clusters;
float max_learning_rate;
float min_learning_rate;
float interim_learning_rate;
Kohonen_units *node_in_cluster_layer;
void establish_Kohonen_topology(int netuse);
void kluster_nodes_compete_for_activation(void);
void update_the_Kohonen_network(int epoch_count, int max_epochs);
virtual void upload_network(void); // retrieve network from file
virtual void savenet(void); // save network to file
Kohonen_Topology(); // class constructor
~Kohonen_Topology(); // class destructor
};

Kohonen_Topology::Kohonen_Topology()
{interim_learning_rate = 1.0;}

Kohonen_Topology::~Kohonen_Topology()
{delete [] node_in_cluster_layer;}

void Kohonen_Topology::establish_Kohonen_topology(int netuse)
{
char netcreate;
int looploc = 0;

if(netuse == 1)
{
do
{
cout <<"/n";
cout << "Do you wish to" << "/n/n";
cout << "C. Create your own Kohonen Map " << "/n";
cout << "U. Upload an existing Kohonen Map " << "/n/n";
cout << "Your choice?: "; cin >> netcreate;
cout << "/n/n";
netcreate = toupper(netcreate);
if((netcreate == 'C') || (netcreate == 'U')) {looploc = 1;}
} while(looploc <= 0);
}
else
{
netcreate = 'C';
}

if((netcreate == 'U') && (netuse == 1))
{upload_network();}
else
{
if(netuse == 1)
{
cout <<"Please enter the dimensions of the network's input signal vector: ";
cin >> dimensions_of_signal; cout <<"/n";
}
cout << "please enter the maximum number of clusters to be formed: ";
cin >> maximum_number_of_clusters; cout << "/n";

// establish clustering layer of Kohonen network
node_in_cluster_layer = new Kohonen_units[maximum_number_of_clusters];
for(int c = 0; c < maximum_number_of_clusters; c++)
{
node_in_cluster_layer[c].number_of_inputs = dimensions_of_signal;
node_in_cluster_layer[c].establish_input_output_arrays();
node_in_cluster_layer[c].establish_input_weight_vector_array();
node_in_cluster_layer[c].initialize_inputs_and_weights();
}
}
}

void Kohonen_Topology::upload_network(void)
{
char getname[13];
ifstream get_ptr;
int netid, nodes, dim;
int dolock = 0;

do
{
cout << "/n/n";
cout << "Please enter the name of the file which holds the Kohonen Map" << "/n";
cin >> getname; cout << "/n";
get_ptr.open(getname, ios::in);
get_ptr >> netid;
if(netid == 3) {dolock = 1;}
else
{
cout << "Error** file contents do not match Kohonen specifications" << "/n";
cout << "try again" << "/n";
get_ptr.close();
}
} while(dolock <= 0);
get_ptr >> dimensions_of_signal;
get_ptr >> maximum_number_of_clusters;

node_in_cluster_layer = new Kohonen_units[maximum_number_of_clusters];
for(nodes = 0; nodes < maximum_number_of_clusters; nodes++)
{
node_in_cluster_layer[nodes].number_of_inputs = dimensions_of_signal;
node_in_cluster_layer[nodes].establish_input_output_arrays();
node_in_cluster_layer[nodes].establish_input_weight_vector_array();
}

for(nodes = 0; nodes < maximum_number_of_clusters; nodes++)
{
for(dim = 0; dim < dimensions_of_signal; dim++)
{get_ptr >> node_in_cluster_layer[nodes].input_weight_vector[dim];}
}
get_ptr.close();
}

void Kohonen_Topology:: kluster_nodes_compete_for_activation(void)
{
float minimum_distance;
for(int m = 0; m < maximum_number_of_clusters; m++)
{
node_in_cluster_layer[m].calculate_sum_square_Euclidean_distance();
if(m == 0)
{
kluster_champ = m;
minimum_distance = node_in_cluster_layer[m].output_value[0];
}
else
{
if(node_in_cluster_layer[m].output_value[0] < minimum_distance)
{
kluster_champ = m;
minimum_distance = node_in_cluster_layer[m].output_value[0];
}
}
}
}

void Kohonen_Topology::update_the_Kohonen_network(int epoch_count, int max_epochs)
{
int maxepoch;
if(max_epochs == 1) {maxepoch = 1;} else {maxepoch = max_epochs - 1;}
float adjusted_learning_rate = max_learning_rate - (((max_learning_rate - min_learning_rate) / maxepoch) * epoch_count);
interim_learning_rate = adjusted_learning_rate * interim_learning_rate;
node_in_cluster_layer[kluster_champ].update_the_weights(interim_learning_rate);
}

void Kohonen_Topology::savenet(void)
{
char savename[13];
ofstream save_ptr;
int node, dim;

cout << "/n/n";
cout << "Please enter the name of the file which will hold the Kohonen Map" <<"/n";
cin >> savename; cout <<"/n";
save_ptr.open(savename, ios::out);
save_ptr << 3 << "/n"; // network identifier number
save_ptr << dimensions_of_signal << "/n";
save_ptr << maximum_number_of_clusters << "/n";
for(node = 0; node < maximum_number_of_clusters; node++)
{
for(dim = 0; dim < dimensions_of_signal; dim++)
{save_ptr << node_in_cluster_layer[node].input_weight_vector[dim] << " ";}
save_ptr <<"/n";
}
save_ptr.close();
}

// define class and member functions which define training and test data
// storage for the Kohonen Self_Organizing Map

class Kohonen_Training_Data : public ART_Training_Data
{
public:
void acquire_net_info(int signal);
void normalize_data_in_array(void);
virtual void request_Kohonen_data(int net_no);
};

void Kohonen_Training_Data::acquire_net_info(int signal)
{signal_dimensions = signal;}

void Kohonen_Training_Data::normalize_data_in_array(void)
{
int i, j, imax, imin;
int trigger;
float min, max;
max_output_value = new float[signal_dimensions];
min_output_value = new float[signal_dimensions];

for(j = 0; j < signal_dimensions; j++)
{
trigger = 1;
// identify minimum and maximum values for each dimension
for(i = 0; i < sample_number; i++)
{
if(i == 0)
{
max = number_of_samples[i].data_in_sample[j];
min = number_of_samples[i].data_in_sample[j];
}
else
{
if(number_of_samples[i].data_in_sample[j] < min)
{min = number_of_samples[i].data_in_sample[j];}

if(number_of_samples[i].data_in_sample[j] > max)
{max = number_of_samples[i].data_in_sample[j];}
}
}

// normalize the values in each dimension of the signal
max_output_value[j] = max;
min_output_value[j] = min;

imax = int(max);
imin = int(min);



if((imax == 1) && (imin == 0) && (max <= 1.0) && (min <= 0.0))
{trigger = 0;}

if((imax == 1) && (imin == 1) && (max <= 1.0) && (min <= 1.0))
{trigger = 0;}

if((imax == 0) && (imin == 0) && (max <= 0.0) && (min <= 0.0))
{trigger = 0;}


if(trigger != 0) // do not normalize binary signals
{
for(i = 0; i < sample_number; i++)
{number_of_samples[i].data_in_sample[j] = (number_of_samples[i].data_in_sample[j] - min)/(max - min);}
}
}
}

void Kohonen_Training_Data::request_Kohonen_data(int net_no)
{
cout << "Enter the file name containing the training data for Kohonen network no. " <<net_no << "/n";
cin >> filename; cout <<"/n";
specify_signal_sample_size();
normalize_data_in_array();
}

class Kohonen_Test_Data: public Kohonen_Training_Data
{public: void request_Kohonen_data(int net_no);};

void Kohonen_Test_Data::request_Kohonen_data(int net_no)
{
cout << "Please enter the file name containing the test data for Kohonen network no. " <<net_no << "/n";
cin >> filename; cout <<"/n";
specify_signal_sample_size();
normalize_data_in_array();
}

//************************************************************************//
class NeuralK // class containing the Kohonen neural net structure
{ // along with training and testing data
private:
Kohonen_Training_Data Kohonen_Train;
Kohonen_Test_Data *Kohonen_Test; // number of tests is variable
int number_of_Kohonen_tests;
void initialize_Kohonen_training_storage_array(int KN);
void establish_Kohonen_test_battery_size(void);
void train_Kohonen_network(int KOHN);
void test_Kohonen_network(int KNET);
public:
Kohonen_Topology Kohonen_Design;
void construct_Kohonen_network(void);
void network_training_testing(int TT);
~NeuralK();
};
//*************************************************************************//

NeuralK::~NeuralK()
{delete [] Kohonen_Test;}

void NeuralK::construct_Kohonen_network(void)
{
clrscr();
cout <<"**** Kohonen Self-Organizing Map ****"<< "/n/n/n";
Kohonen_Design.establish_Kohonen_topology(1);
}

void NeuralK::initialize_Kohonen_training_storage_array(int KN)
{
int KT = KN;
Kohonen_Train.acquire_net_info(Kohonen_Design.dimensions_of_signal);
Kohonen_Train.request_Kohonen_data(KT);
}

void NeuralK::establish_Kohonen_test_battery_size(void)
{
cout << "Please enter the number of tests you wish to run on the Kohonen Neural Network: ";
cin >> number_of_Kohonen_tests; cout << "/n";
if(number_of_Kohonen_tests > 0)
{
// create testing array
Kohonen_Test = new Kohonen_Test_Data[number_of_Kohonen_tests];
for(int t = 0; t < number_of_Kohonen_tests; t++)
{Kohonen_Test[t].acquire_net_info(Kohonen_Design.dimensions_of_signal);}
}
}

void NeuralK::train_Kohonen_network(int KOHN)
{
int dim, ep, k_epochs, pattern, knodes, dolock;
clrscr();
cout <<"/n/n";
cout << "For Neural Network #"<<KOHN<<"/n/n";
cout << "please enter the maximum learning rate parameter (0-1): ";
cin >> Kohonen_Design.max_learning_rate; cout <<"/n";
cout << "please enter the minimum learning rate parameter (0-1): ";
cin >> Kohonen_Design.min_learning_rate; cout <<"/n";
cout << "please enter the number of epochs used to train the Kohonen Map: ";
cin >> k_epochs; cout << "/n";
ep = 0;
dolock = 0;
do
{
for(pattern = 0; pattern < Kohonen_Train.sample_number; pattern++)
{
for(knodes = 0; knodes < Kohonen_Design.maximum_number_of_clusters; knodes++)
{
for(dim = 0; dim < Kohonen_Design.dimensions_of_signal; dim++)
{Kohonen_Design.node_in_cluster_layer[knodes].input_value[dim] = Kohonen_Train.number_of_samples[pattern].data_in_sample[dim];}
}
Kohonen_Design.kluster_nodes_compete_for_activation();
Kohonen_Design.update_the_Kohonen_network(ep, k_epochs);
}
cout << "Epoch " << ep + 1 << " is completed" <<"/n";
if((ep == k_epochs - 1) || (Kohonen_Design.interim_learning_rate == 0.0))
{dolock = 1;}
ep = ep + 1;
} while(dolock <= 0);

Kohonen_Train.delete_signal_array();
}

void NeuralK::test_Kohonen_network(int KNET)
{
int tnet, dim, pattern, knodes;
float realvalue;
tnet = KNET;
clrscr();
for(int ktest = 0; ktest < number_of_Kohonen_tests; ktest++)
{
Kohonen_Test[ktest].request_Kohonen_data(tnet);
cout <<"For Kohonen neural network #"<< KNET <<" and test #"<< ktest+1 <<":" <<"/n";
cout <<"please enter the name of the file to hold the test" << "/n";
cin >> Kohonen_Test[ktest].resultsname; cout <<"/n";
ofstream Kohonen_savefile_ptr(Kohonen_Test[ktest].resultsname);

for(pattern = 0; pattern < Kohonen_Test[ktest].sample_number; pattern++)
{
for(knodes = 0; knodes < Kohonen_Design.maximum_number_of_clusters; knodes++)
{
for(dim = 0; dim < Kohonen_Design.dimensions_of_signal; dim++)
{Kohonen_Design.node_in_cluster_layer[knodes].input_value[dim] = Kohonen_Test[ktest].number_of_samples[pattern].data_in_sample[dim];}
}
Kohonen_Design.kluster_nodes_compete_for_activation();

Kohonen_savefile_ptr << pattern + 1 << " ";
for(dim = 0; dim < Kohonen_Design.dimensions_of_signal; dim++)
{
realvalue = (Kohonen_Test[ktest].number_of_samples[pattern].data_in_sample[dim]*(Kohonen_Test[ktest].max_output_value[dim] - Kohonen_Test[ktest].min_output_value[dim])) + Kohonen_Test[ktest].min_output_value[dim];
Kohonen_savefile_ptr << realvalue << " ";
}
Kohonen_savefile_ptr << " " << Kohonen_Design.kluster_champ + 1 << "/n";
}
Kohonen_savefile_ptr.close();
Kohonen_Test[ktest].delete_signal_array();
} // end test loop
}

void NeuralK::network_training_testing(int TT)
{
int tt = TT;
int menu_choice;

clrscr();
cout << "/n/n/n/n";
cout << "**************** Operations Menu ****************" << "/n/n";
cout << " Please select one of the following options:" <<"/n/n";
cout << " 1. Train Kohonen network only " <<"/n/n";
cout << " 2. Test Kohonen network only " <<"/n/n";
cout << " 3. Train and Test Kohonen network" <<"/n/n";
cout << "*************************************************" << "/n/n";
cout << " Your choice?: "; cin >> menu_choice;
cout << "/n/n";
switch(menu_choice)
{
case 1:
initialize_Kohonen_training_storage_array(tt);
train_Kohonen_network(tt);
break;

case 2:
establish_Kohonen_test_battery_size();
if(number_of_Kohonen_tests > 0)
{test_Kohonen_network(tt);}
break;

case 3:
initialize_Kohonen_training_storage_array(tt);
train_Kohonen_network(tt);
establish_Kohonen_test_battery_size();
if(number_of_Kohonen_tests > 0)
{test_Kohonen_network(tt);}
break;

default:network_training_testing(tt);
}

}
// This concludes the Kohonen section of the program
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
// (Radial Basis Function Network)
// define class and member functions which define Radial Basis Topology

class Radial_Basis_Topology: public Kohonen_Topology
{
public:
int number_of_output_units;
Output_units *node_in_output_layer;
int activation_function;
void establish_Radial_Basis_topology(void);
void establish_activation_function(void);
void calculate_transfer_function_widths(void);
void transfer_Gaussian_to_Output_layer(void);
void upload_network(void); // retrieve network from file
void savenet(void);
~Radial_Basis_Topology();
};

Radial_Basis_Topology::~Radial_Basis_Topology()
{ delete [] node_in_output_layer;}

void Radial_Basis_Topology::establish_activation_function(void)
{
int dolock = 1;
int bchoice;
cout << "/n";
cout << "For the output layer:" << "/n";
do
{
cout << "please select the type of activation function you wish the nodes to use" << "/n/n";
cout << "1. Binary Sigmoid Function " << "/n";
cout << "2. Bipolar Sigmoid Function " << "/n/n";
cout << "Your Selection "; cin >> bchoice;
cout << "/n/n";
if((bchoice == 1) || (bchoice == 2)) {dolock = 0;}
} while(dolock >= 1);
activation_function = bchoice;
}

void Radial_Basis_Topology::establish_Radial_Basis_topology(void)
{
char netcreate;
int looploc = 0;

do
{
cout <<"/n";
cout << "Do you wish to" << "/n/n";
cout << "C. Create your own RBF Network " << "/n";
cout << "U. Upload an existing RBF Network " << "/n/n";
cout << "Your choice?: "; cin >> netcreate;
cout << "/n/n";
netcreate = toupper(netcreate);
if((netcreate == 'C') || (netcreate == 'U')) {looploc = 1;}
} while(looploc <= 0);

if(netcreate == 'U')
{upload_network();}
else
{
cout << "Please enter the dimensions of the RBF networks's input signal vector: ";
cin >> dimensions_of_signal; cout << "/n";
establish_Kohonen_topology(2); // establishes maximum number of clusters
cout << "/n";
cout << "please enter the number of nodes in the RBF output layer: ";
cin >> number_of_output_units;
cout << "/n/n";
node_in_output_layer = new Output_units[number_of_output_units];
for(int o = 0; o < number_of_output_units; o++)
{
node_in_output_layer[o].number_of_input_units = maximum_number_of_clusters;
node_in_output_layer[o].establish_array_of_processing_unit_inputs();
node_in_output_layer[o].establish_weight_vector_for_processing_units();
node_in_output_layer[o].bias = 1.0 - (2.0 * bedlam((long*)(gaset)));
}
establish_activation_function();
}
}

void Radial_Basis_Topology::upload_network(void)
{
char getname[13];
ifstream get_ptr;
int netid, node, dim;
int dolock = 0;

do
{
cout << "/n/n";
cout << "Please enter the name of the file which holds the RBF network" << "/n";
cin >> getname; cout << "/n";
get_ptr.open(getname, ios::in);
get_ptr >> netid;
if(netid == 4) {dolock = 1;}
else
{
cout << "Error** file contents do not match RBF specifications" << "/n";
cout << "try again" << "/n";
get_ptr.close();
}
} while(dolock <= 0);
get_ptr >> dimensions_of_signal;
get_ptr >> number_of_output_units;
get_ptr >> activation_function;
get_ptr >> maximum_number_of_clusters;

node_in_output_layer = new Output_units[number_of_output_units];
for(node = 0; node < number_of_output_units; node++)
{
node_in_output_layer[node].number_of_input_units = maximum_number_of_clusters;
node_in_output_layer[node].establish_array_of_processing_unit_inputs();
node_in_output_layer[node].establish_weight_vector_for_processing_units();
get_ptr >> node_in_output_layer[node].bias;
}

for(node = 0; node < number_of_output_units; node++)
{
for(dim = 0; dim < maximum_number_of_clusters; dim++)
{get_ptr >> node_in_output_layer[node].weight_of_inputs[dim];}
}

node_in_cluster_layer = new Kohonen_units[maximum_number_of_clusters];
for(node = 0; node < maximum_number_of_clusters; node++)
{
node_in_cluster_layer[node].number_of_inputs = dimensions_of_signal;
node_in_cluster_layer[node].establish_input_output_arrays();
node_in_cluster_layer[node].establish_input_weight_vector_array();
get_ptr >> node_in_cluster_layer[node].transfer_function_width;
}

for(node = 0; node < maximum_number_of_clusters; node++)
{
for(dim = 0; dim < maximum_number_of_clusters; dim++)
{get_ptr >> node_in_cluster_layer[node].input_weight_vector[dim];}
}
get_ptr.close();
}

void Radial_Basis_Topology::calculate_transfer_function_widths(void)
{
float sum, w1, w2;
int i, j, k, ihold, jhold, khold;

for(i = 0; i < maximum_number_of_clusters; i++)
{node_in_cluster_layer[i].transfer_function_width = 0.0;}

for(i = 0; i < maximum_number_of_clusters - 1; i++)
{
for(j = i + 1; j < maximum_number_of_clusters; j++)
{
sum = 0.0;
for(k = 0; k < dimensions_of_signal; k++)
{
khold = k;
ihold = i;
jhold = j;
w1 = node_in_cluster_layer[ihold].input_weight_vector[khold];
w2 = node_in_cluster_layer[jhold].input_weight_vector[khold];
sum = pow((w1 - w2), 2.0);
node_in_cluster_layer[ihold].transfer_function_width += sum;
node_in_cluster_layer[jhold].transfer_function_width += sum;
}
}
}

for(i = 0; i < maximum_number_of_clusters; i++)
{
node_in_cluster_layer[i].transfer_function_width = (1.0 / (maximum_number_of_clusters - 1)) * node_in_cluster_layer[i].transfer_function_width;
node_in_cluster_layer[i].transfer_function_width = pow(node_in_cluster_layer[i].transfer_function_width, 0.5);
}
}

void Radial_Basis_Topology::transfer_Gaussian_to_Output_layer(void)
{
int i, j;

for(i = 0; i < maximum_number_of_clusters; i++)
{
node_in_cluster_layer[i].calculate_sum_square_Euclidean_distance();
node_in_cluster_layer[i].execute_Gaussian_transfer_function();
}

// transfer signal from cluster to output units and calculate output
for(i = 0; i < number_of_output_units; i++)
{
for(j = 0; j < maximum_number_of_clusters; j++)
{node_in_output_layer[i].processing_unit_input[j] = node_in_cluster_layer[j].Gaussian_transfer_output;}
node_in_output_layer[i].calculate_output_signal(activation_function);
}
}

void Radial_Basis_Topology::savenet(void)
{
char savename[13];
ofstream save_ptr;
int node, dim;

cout << "/n/n";
cout << "Please enter the name of the file which will hold the RBF network"<<"/n";
cin >> savename; cout <<"/n";
save_ptr.open(savename, ios::out);

save_ptr << 4 << "/n"; //network identifier number
save_ptr << dimensions_of_signal << "/n";
save_ptr << number_of_output_units << "/n";
save_ptr << activation_function << "/n";
save_ptr << maximum_number_of_clusters << "/n";

for(node = 0; node < number_of_output_units; node++)
{save_ptr << node_in_output_layer[node].bias << " ";}
save_ptr << "/n";

for(node = 0; node < number_of_output_units; node++)
{
for(dim = 0; dim < maximum_number_of_clusters; dim++)
{save_ptr << node_in_output_layer[node].weight_of_inputs[dim] << " ";}
save_ptr << "/n";
}

for(node = 0; node < maximum_number_of_clusters; node++)
{save_ptr << node_in_cluster_layer[node].transfer_function_width << " ";}
save_ptr << "/n";

for(node = 0; node < maximum_number_of_clusters; node++)
{
for(dim = 0; dim < dimensions_of_signal; dim++)
{save_ptr << node_in_cluster_layer[node].input_weight_vector[dim] << " ";}
save_ptr << "/n";
}
save_ptr.close();

}
//******************************************************************************
class NeuralR // class containing the Radial Basis neural net structure
{ // along with training and testing data
private:
Training RTrain; // file name and dynamic array for training
Testing *RTests; // files containing data to test network
int number_of_tests; // number of tests run on the neural net
void initialize_training_storage_array(int R);
void establish_test_battery_size(void);
void train_RBF_neural_network(int RBF);
void test_neural_network(int RBN);
public:
Radial_Basis_Topology RBF_Design; // specification of radial basis network
void establish_Radial_Basis_network(void);
void network_training_testing(int TT);
~NeuralR();
};
//******************************************************************************

NeuralR::~NeuralR()
{delete [] RTests;}

void NeuralR::initialize_training_storage_array(int R)
{
RTrain.acquire_net_info(RBF_Design.dimensions_of_signal, RBF_Design.number_of_output_units);
RTrain.request_training_data(R);
}

void NeuralR::establish_test_battery_size(void)
{
clrscr();
cout << "Please enter the number of tests you wish to run on the RBF network: ";
cin >> number_of_tests; cout << "/n";
RTests = new Testing[number_of_tests];
for(int i = 0; i < number_of_tests; i++)
{RTests[i].acquire_net_info(RBF_Design.dimensions_of_signal, RBF_Design.number_of_output_units);}
}

void NeuralR::establish_Radial_Basis_network(void)
{
clrscr();
cout << " **** Radial Basis Function Network **** " << "/n/n/n";
RBF_Design.establish_Radial_Basis_topology();
}

void NeuralR::train_RBF_neural_network(int RBF)
{
char savefile;
float output_error, sum_of_error, real_error_difference, target_minimum_average_squared_error;
int bepoch, outnode, sig, sigdim, cnode;
int dim, ep, k_epochs, pattern, knodes, dolock;
float *maxdifference;
float *meandifference;
int loopexit = 1;
ofstream savefile_ptr;

// establish cluster centers weight vectors via K-means clustering
clrscr();
cout <<"/n/n";
cout << "For Neural Network #"<<RBF<<"/n/n";
cout << "please enter the maximum learning rate parameter (0-1): ";
cin >> RBF_Design.max_learning_rate; cout <<"/n";
cout << "please enter the minimum learning rate parameter (0-1): ";
cin >> RBF_Design.min_learning_rate; cout <<"/n";
cout << "please enter the number of epochs used to train the RBF clusters: ";
cin >> k_epochs; cout << "/n/n/n";
ep = 0;
dolock = 0;
do
{
for(pattern = 0; pattern < RTrain.sample_number; pattern++)
{
for(knodes = 0; knodes < RBF_Design.maximum_number_of_clusters; knodes++)
{
for(dim = 0; dim < RBF_Design.dimensions_of_signal; dim++)
{
RBF_Design.node_in_cluster_layer[knodes].input_value[dim] = RTrain.number_of_samples[pattern].data_in_sample[dim];
}

}
RBF_Design.kluster_nodes_compete_for_activation();
RBF_Design.update_the_Kohonen_network(ep, k_epochs);
}

if((ep == k_epochs - 1) || (RBF_Design.interim_learning_rate == 0.0))
{dolock = 1;}
ep = ep + 1;
} while(dolock <= 0);

RBF_Design.calculate_transfer_function_widths();

// use supervised learning for output layer weight vector
cout << "Cluster center vectors established" << "/n/n";
cout << "please enter the number of epochs you wish to use for training"<< "/n";
cout << "the output layer: "; cin >> RTrain.number_of_epochs; cout<< "/n/n";
cout << "please enter the learning rate constant for backpropagation (0-1): ";
cin >> RTrain.rate_of_learning; cout << "/n";
cout << "please enter the minimum average squared error you wish to target" << "/n";
cin >> target_minimum_average_squared_error; cout << "/n";
do
{
cout << "do you wish to save the mean error, maximum error" << "/n";
cout << "and average squared error for each epoch to a file? (Y or N): "; cin >> savefile;
savefile = toupper(savefile);
if((savefile == 'Y') || (savefile == 'N')) {loopexit = 2;}
cout << "/n";
} while(loopexit <= 1);

if(savefile == 'Y')
{
cout << "please enter the name of the file which will hold the results of training:" << "/n";
cin >> RTrain.resultsname; cout <<"/n";
savefile_ptr.open(RTrain.resultsname, ios::out);
}

maxdifference = new float[RBF_Design.number_of_output_units];
meandifference = new float[RBF_Design.number_of_output_units];

// intiate backpropagation for appropriate number of epochs
bepoch = 0;
do
{
sum_of_error = 0;

for(sig = 0; sig < RTrain.sample_number; sig++)
{
output_error = 0;
for(sigdim = 0; sigdim < RTrain.signal_dimensions; sigdim++)
{
for(cnode = 0; cnode < RBF_Design.maximum_number_of_clusters; cnode++)
{RBF_Design.node_in_cluster_layer[cnode].input_value[sigdim] = RTrain.number_of_samples[sig].data_in_sample[sigdim];}
}
RBF_Design.transfer_Gaussian_to_Output_layer();

for(outnode = 0; outnode < RBF_Design.number_of_output_units; outnode++)
{
RBF_Design.node_in_output_layer[outnode].calculate_output_error_information_term(RTrain.number_of_samples[sig].data_in_sample[RTrain.signal_dimensions + outnode], RBF_Design.activation_function);
// calculate the instantaneous sum of squared errors (Haykin, 1994)
real_error_difference = (pow(RBF_Design.node_in_output_layer[outnode].error_difference_squared, 0.5)) * (RTrain.max_output_value[outnode] - RTrain.min_output_value[outnode]);
output_error += 0.5 * pow(real_error_difference, 2.0);

// calculate maximum and mean absolute error difference for each node
real_error_difference = RBF_Design.node_in_output_layer[outnode].absolute_error_difference * (RTrain.max_output_value[outnode] - RTrain.min_output_value[outnode]);
meandifference[outnode] += real_error_difference / float(RTrain.sample_number);
if(sig == 0) {maxdifference[outnode] = real_error_difference;}
else
{
if(real_error_difference > maxdifference[outnode])
{maxdifference[outnode] = real_error_difference;}
}
}

// average squared error for each signal is saved
sum_of_error += output_error / float (RTrain.sample_number);

// update the RBF network's output nodes
for(outnode = 0; outnode < RBF_Design.number_of_output_units; outnode++)
{RBF_Design.node_in_output_layer[outnode].calculate_weight_and_bias_correction_terms(RTrain.rate_of_learning);}

} // end sig loop

// save error information (if required)
if(savefile == 'Y')
{
savefile_ptr << bepoch + 1 << " ";
savefile_ptr << sum_of_error << " ";
for(outnode = 0; outnode < RBF_Design.number_of_output_units; outnode++)
{savefile_ptr << maxdifference[outnode] << " " << meandifference[outnode] << " ";}
savefile_ptr << endl;
cout.width(6);
clrscr();
cout << "Epoch #"<< bepoch + 1 <<" is completed " << endl;
}

if(bepoch == 0)
{RTrain.minimum_average_squared_error = sum_of_error;}
else
{
if(sum_of_error < RTrain.minimum_average_squared_error)
{RTrain.minimum_average_squared_error = sum_of_error;}
}

for(outnode = 0; outnode < RBF_Design.number_of_output_units; outnode++)
{ maxdifference[outnode] = 0.0; meandifference[outnode] = 0.0;}

if(RTrain.minimum_average_squared_error <= target_minimum_average_squared_error)
{break;}

bepoch = bepoch + 1;

} while(bepoch < RTrain.number_of_epochs);

savefile_ptr.close();

// delete arrays holding the training data
RTrain.delete_signal_data_array();
delete [] maxdifference;
delete [] meandifference;
}

void NeuralR::test_neural_network(int RBN)
{
float output_error, real_output;
int sig, sigdim, knodes, outnode;
int rbn = RBN;

for(int RBtest = 0; RBtest < number_of_tests; RBtest++)
{
RTests[RBtest].request_testing_data(rbn, RBtest + 1);

cout << "please enter the name of the file which will hold the results of test: " << RBtest + 1 << "/n";
cin >> RTests[RBtest].resultsname; cout << "/n";
ofstream savefile_ptr(RTests[RBtest].resultsname);

for(sig = 0; sig < RTests[RBtest].sample_number; sig++)
{
output_error = 0.0;
savefile_ptr << sig + 1 <<" ";
for(knodes = 0; knodes < RBF_Design.maximum_number_of_clusters; knodes++)
{
for(sigdim = 0; sigdim < RBF_Design.dimensions_of_signal; sigdim++)
{RBF_Design.node_in_cluster_layer[knodes].input_value[sigdim] = RTests[RBtest].number_of_samples[sig].data_in_sample[sigdim];}
}
RBF_Design.transfer_Gaussian_to_Output_layer();

// send target output to a file
for(outnode = 0; outnode < RBF_Design.number_of_output_units; outnode++)
{
real_output = RTests[RBtest].min_output_value[outnode] + (RTests[RBtest].number_of_samples[sig].data_in_sample[outnode + RBF_Design.dimensions_of_signal] * (RTests[RBtest].max_output_value[outnode] - RTests[RBtest].min_output_value[outnode]));
savefile_ptr << real_output << " ";
}

savefile_ptr <<" ";

// send network output to a file
for(outnode = 0; outnode < RBF_Design.number_of_output_units; outnode++)
{
RBF_Design.node_in_output_layer[outnode].calculate_output_error_information_term(RTests[RBtest].number_of_samples[sig].data_in_sample[RTests[RBtest].signal_dimensions + outnode], RBF_Design.activation_function);
real_output = RTests[RBtest].min_output_value[outnode] + (RBF_Design.node_in_output_layer[outnode].output_signal * (RTests[RBtest].max_output_value[outnode] - RTests[RBtest].min_output_value[outnode]));
savefile_ptr << real_output << " ";
}

// send absolute error difference to a file
for(outnode = 0; outnode < RBF_Design.number_of_output_units; outnode++)
{
real_output = (pow(RBF_Design.node_in_output_layer[outnode].error_difference_squared, 0.5)) * (RTests[RBtest].max_output_value[outnode] - RTests[RBtest].min_output_value[outnode]);
savefile_ptr << real_output << " ";
real_output = pow(real_output, 2.0);
output_error += 0.5 * real_output;
}
// save sum square of error
savefile_ptr << output_error << "/n";
if(sig == RTests[RBtest].sample_number - 1)
{savefile_ptr.close();}
}
RTests[RBtest].delete_signal_array();
}
} // end test neural network function

void NeuralR::network_training_testing(int TT)
{
int tt = TT;
int menu_choice;

clrscr();
cout << "/n/n/n/n";
cout << "**************** Operations Menu ****************" << "/n/n";
cout << " Please select one of the following options:" <<"/n/n";
cout << " 1. Train RBF network only " <<"/n/n";
cout << " 2. Test RBF network only " <<"/n/n";
cout << " 3. Train and Test RBF network" <<"/n/n";
cout << "*************************************************" << "/n/n";
cout << " Your choice?: "; cin >> menu_choice;
cout << "/n/n";

switch(menu_choice)
{
case 1:
initialize_training_storage_array(tt);
train_RBF_neural_network(tt);
break;

case 2:
establish_test_battery_size();
if(number_of_tests > 0)
{test_neural_network(tt);}
break;

case 3:
initialize_training_storage_array(tt);
train_RBF_neural_network(tt);
establish_test_battery_size();
if(number_of_tests > 0)
{test_neural_network(tt);}
break;

default:network_training_testing(tt);
}
}
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
// this template class stores the neural networks to a file

template <class Type>
class Storage
{
public:
void save_neural_network(Type & NET_Topology);
};

template <class Type>
void Storage<Type>::save_neural_network(Type & NET_Topology)
{
char schoice;
int dolock = 0;

do
{
clrscr();
cout << "/n/n/n/n";
cout << "Do you wish to save this neural network? (Y/N): ";
cin >> schoice;
schoice = toupper(schoice);
if((schoice == 'Y') || (schoice == 'N')) {dolock = 1;}
} while(dolock <= 0);
if(schoice == 'Y')
{NET_Topology.savenet();}
}
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

class Neural_Window // this class holds the different types of neural nets
{
private:
void establish_network_type(void);

public: // user interface
char neural_network_type;
int neural_network_number;
void display_menu_for_net_selection(int NNnum);
};

void Neural_Window::display_menu_for_net_selection(int NNnum)
{
clrscr();
neural_network_number = NNnum;
cout.fill('*');
cout.width(70); cout << "/n";
cout.width(42);
cout << " Neural Network " << neural_network_number << " ";
cout.width(26); cout << "/n";
cout.width(71); cout << "/n/n";
cout << "Please select one of the following network types from the Main Menu";
int i = 0;
do {cout << "/n"; i = i + 1;} while (i < 3);
cout.fill(' ');
cout.width(10);
cout << " *** / Main Menu // ***"; cout << "/n/n";
cout.width(6);
cout << " F. Feedforward network using backpropagation " << "/n/n";
cout.width(6);
cout << " A. Adaptive Resonance Theory network for binary signals " <<"/n/n";
cout.width(6);
cout << " K. Kohonen Self-Organizing Map " <<"/n/n";
cout.width(6);
cout << " R. Radial Basis Function Network " <<"/n/n";
cout.width(6);
cout << " E. Exit Program" <<"/n/n";
cout << "/n/n/n";
cout.width(6);
cout << "Network Type (?) "; cin >> neural_network_type;
neural_network_type = toupper(neural_network_type);
if(neural_network_type != 'E')
{establish_network_type();}
}

void Neural_Window::establish_network_type(void)
{
int NNN = neural_network_number;

NeuralA *ART;
NeuralB *Backpropagation;
NeuralK *KOH;
NeuralR *RBF;

switch(neural_network_type)
{
case 'A': // Adaptive Resonance Theory Network (ART1) for clustering
ART = new NeuralA;
Storage<ART_Topology> Astore;
ART->construct_ART_network();
ART->network_training_testing(NNN);
Astore.save_neural_network(ART->ART_Design);
break;

case 'F': // Feedforward Network Using Backpropagation
Backpropagation = new NeuralB;
Storage<Back_Topology> Bstore;
Backpropagation->establish_backprop_network();
Backpropagation->network_training_testing(NNN);
Bstore.save_neural_network(Backpropagation->Net_Design);
break;

case 'K': // Kohonen Self-Organizing Map
KOH = new NeuralK;
Storage<Kohonen_Topology> Kstore;
KOH->construct_Kohonen_network();
KOH->network_training_testing(NNN);
Kstore.save_neural_network(KOH->Kohonen_Design);
break;

case 'R': // Radial Basis Function Network
RBF = new NeuralR;
Storage<Radial_Basis_Topology> Rstore;
RBF->establish_Radial_Basis_network();
RBF->network_training_testing(NNN);
Rstore.save_neural_network(RBF->RBF_Design);
break;

default: display_menu_for_net_selection(neural_network_number);

}

}

//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

void main(void)
{
_control87(MCW_EM, MCW_EM); // will mask floating point overflows,
// underflows, or divisions by 0

int number_of_nets;
Neural_Window User_net;
clrscr();
cout << " ******* Welcome to Pitt-Networks!! ******** " << "/n/n/n/a";
cout << "Please enter the number of networks you wish to develop: "; cin >> number_of_nets;


for(int NWnet = 1; NWnet < number_of_nets + 1; NWnet++)
{
User_net.display_menu_for_net_selection(NWnet);
if(User_net.neural_network_type == 'E')
{break;}
}

}


大数据培训、人工智能培训、Python培训、大数据培训机构、大数据培训班、数据分析培训、大数据可视化培训,就选光环大数据!光环大数据,聘请专业的大数据领域知名讲师,确保教学的整体质量与教学水准。讲师团及时掌握时代潮流技术,将前沿技能融入教学中,确保学生所学知识顺应时代所需。通过深入浅出、通俗易懂的教学方式,指导学生更快的掌握技能知识,成就上万个高薪就业学子。 更多问题咨询,欢迎点击------>>>>在线客服

你可能也喜欢这些

在线客服咨询

领取资料

X
立即免费领取

请准确填写您的信息

点击领取
#第三方统计代码(模版变量) '); })();
'); })();