2 Fast Artificial Neural Network Library (fann)
3 Copyright (C) 2003 Steffen Nissen (lukesky@diku.dk)
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 /* Create a network from a configuration file.
30 FANN_EXTERNAL struct fann *FANN_API fann_create_from_file(const char *configuration_file)
33 FILE *conf = fopen(configuration_file, "r");
37 fann_error(NULL, FANN_E_CANT_OPEN_CONFIG_R, configuration_file);
40 ann = fann_create_from_fd(conf, configuration_file);
47 FANN_EXTERNAL int FANN_API fann_save(struct fann *ann, const char *configuration_file)
49 return fann_save_internal(ann, configuration_file, 0);
52 /* Save the network as fixed point data.
54 FANN_EXTERNAL int FANN_API fann_save_to_fixed(struct fann *ann, const char *configuration_file)
56 return fann_save_internal(ann, configuration_file, 1);
60 Used to save the network to a file.
62 int fann_save_internal(struct fann *ann, const char *configuration_file, unsigned int save_as_fixed)
65 FILE *conf = fopen(configuration_file, "w+");
69 fann_error((struct fann_error *) ann, FANN_E_CANT_OPEN_CONFIG_W, configuration_file);
72 retval = fann_save_internal_fd(ann, conf, configuration_file, save_as_fixed);
78 Used to save the network to a file descriptor.
80 int fann_save_internal_fd(struct fann *ann, FILE * conf, const char *configuration_file,
81 unsigned int save_as_fixed)
83 struct fann_layer *layer_it;
84 int calculated_decimal_point = 0;
85 struct fann_neuron *neuron_it, *first_neuron;
87 struct fann_neuron **connected_neurons;
91 /* variabels for use when saving floats as fixed point variabels */
92 unsigned int decimal_point = 0;
93 unsigned int fixed_multiplier = 0;
94 fann_type max_possible_value = 0;
95 unsigned int bits_used_for_max = 0;
96 fann_type current_max_value = 0;
102 /* save the version information */
103 fprintf(conf, FANN_FIX_VERSION "\n");
107 /* save the version information */
108 fprintf(conf, FANN_FLO_VERSION "\n");
111 /* save the version information */
112 fprintf(conf, FANN_FIX_VERSION "\n");
118 /* calculate the maximal possible shift value */
120 for(layer_it = ann->first_layer + 1; layer_it != ann->last_layer; layer_it++)
122 for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++)
124 /* look at all connections to each neurons, and see how high a value we can get */
125 current_max_value = 0;
126 for(i = neuron_it->first_con; i != neuron_it->last_con; i++)
128 current_max_value += fann_abs(ann->weights[i]);
131 if(current_max_value > max_possible_value)
133 max_possible_value = current_max_value;
138 for(bits_used_for_max = 0; max_possible_value >= 1; bits_used_for_max++)
140 max_possible_value /= 2.0;
143 /* The maximum number of bits we shift the fix point, is the number
144 * of bits in a integer, minus one for the sign, one for the minus
145 * in stepwise, and minus the bits used for the maximum.
146 * This is devided by two, to allow multiplication of two fixed
149 calculated_decimal_point = (sizeof(int) * 8 - 2 - bits_used_for_max) / 2;
151 if(calculated_decimal_point < 0)
157 decimal_point = calculated_decimal_point;
160 fixed_multiplier = 1 << decimal_point;
163 printf("calculated_decimal_point=%d, decimal_point=%u, bits_used_for_max=%u\n",
164 calculated_decimal_point, decimal_point, bits_used_for_max);
167 /* save the decimal_point on a seperate line */
168 fprintf(conf, "decimal_point=%u\n", decimal_point);
171 /* save the decimal_point on a seperate line */
172 fprintf(conf, "decimal_point=%u\n", ann->decimal_point);
176 /* Save network parameters */
177 fprintf(conf, "num_layers=%u\n", ann->last_layer - ann->first_layer);
178 fprintf(conf, "learning_rate=%f\n", ann->learning_rate);
179 fprintf(conf, "connection_rate=%f\n", ann->connection_rate);
180 fprintf(conf, "shortcut_connections=%u\n", ann->shortcut_connections);
182 fprintf(conf, "learning_momentum=%f\n", ann->learning_momentum);
183 fprintf(conf, "training_algorithm=%u\n", ann->training_algorithm);
184 fprintf(conf, "train_error_function=%u\n", ann->train_error_function);
185 fprintf(conf, "train_stop_function=%u\n", ann->train_stop_function);
186 fprintf(conf, "cascade_output_change_fraction=%f\n", ann->cascade_output_change_fraction);
187 fprintf(conf, "quickprop_decay=%f\n", ann->quickprop_decay);
188 fprintf(conf, "quickprop_mu=%f\n", ann->quickprop_mu);
189 fprintf(conf, "rprop_increase_factor=%f\n", ann->rprop_increase_factor);
190 fprintf(conf, "rprop_decrease_factor=%f\n", ann->rprop_decrease_factor);
191 fprintf(conf, "rprop_delta_min=%f\n", ann->rprop_delta_min);
192 fprintf(conf, "rprop_delta_max=%f\n", ann->rprop_delta_max);
193 fprintf(conf, "rprop_delta_zero=%f\n", ann->rprop_delta_zero);
194 fprintf(conf, "cascade_output_stagnation_epochs=%u\n", ann->cascade_output_stagnation_epochs);
195 fprintf(conf, "cascade_candidate_change_fraction=%f\n", ann->cascade_candidate_change_fraction);
196 fprintf(conf, "cascade_candidate_stagnation_epochs=%u\n", ann->cascade_candidate_stagnation_epochs);
197 fprintf(conf, "cascade_max_out_epochs=%u\n", ann->cascade_max_out_epochs);
198 fprintf(conf, "cascade_max_cand_epochs=%u\n", ann->cascade_max_cand_epochs);
199 fprintf(conf, "cascade_num_candidate_groups=%u\n", ann->cascade_num_candidate_groups);
204 fprintf(conf, "bit_fail_limit=%u\n", (int) floor((ann->bit_fail_limit * fixed_multiplier) + 0.5));
205 fprintf(conf, "cascade_candidate_limit=%u\n", (int) floor((ann->cascade_candidate_limit * fixed_multiplier) + 0.5));
206 fprintf(conf, "cascade_weight_multiplier=%u\n", (int) floor((ann->cascade_weight_multiplier * fixed_multiplier) + 0.5));
211 fprintf(conf, "bit_fail_limit="FANNPRINTF"\n", ann->bit_fail_limit);
212 fprintf(conf, "cascade_candidate_limit="FANNPRINTF"\n", ann->cascade_candidate_limit);
213 fprintf(conf, "cascade_weight_multiplier="FANNPRINTF"\n", ann->cascade_weight_multiplier);
216 fprintf(conf, "cascade_activation_functions_count=%u\n", ann->cascade_activation_functions_count);
217 fprintf(conf, "cascade_activation_functions=");
218 for(i = 0; i < ann->cascade_activation_functions_count; i++)
219 fprintf(conf, "%u ", ann->cascade_activation_functions[i]);
222 fprintf(conf, "cascade_activation_steepnesses_count=%u\n", ann->cascade_activation_steepnesses_count);
223 fprintf(conf, "cascade_activation_steepnesses=");
224 for(i = 0; i < ann->cascade_activation_steepnesses_count; i++)
228 fprintf(conf, "%u ", (int) floor((ann->cascade_activation_steepnesses[i] * fixed_multiplier) + 0.5));
231 fprintf(conf, FANNPRINTF" ", ann->cascade_activation_steepnesses[i]);
235 fprintf(conf, "layer_sizes=");
236 for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++)
238 /* the number of neurons in the layers (in the last layer, there is always one too many neurons, because of an unused bias) */
239 fprintf(conf, "%u ", layer_it->last_neuron - layer_it->first_neuron);
244 fprintf(conf, "neurons (num_inputs, activation_function, activation_steepness)=");
245 for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++)
248 for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++)
253 fprintf(conf, "(%u, %u, %u) ", neuron_it->last_con - neuron_it->first_con,
254 neuron_it->activation_function,
255 (int) floor((neuron_it->activation_steepness * fixed_multiplier) + 0.5));
259 fprintf(conf, "(%u, %u, " FANNPRINTF ") ", neuron_it->last_con - neuron_it->first_con,
260 neuron_it->activation_function, neuron_it->activation_steepness);
263 fprintf(conf, "(%u, %u, " FANNPRINTF ") ", neuron_it->last_con - neuron_it->first_con,
264 neuron_it->activation_function, neuron_it->activation_steepness);
270 connected_neurons = ann->connections;
271 weights = ann->weights;
272 first_neuron = ann->first_layer->first_neuron;
274 /* Now save all the connections.
275 * We only need to save the source and the weight,
276 * since the destination is given by the order.
278 * The weight is not saved binary due to differences
279 * in binary definition of floating point numbers.
280 * Especially an iPAQ does not use the same binary
281 * representation as an i386 machine.
283 fprintf(conf, "connections (connected_to_neuron, weight)=");
284 for(i = 0; i < ann->total_connections; i++)
289 /* save the connection "(source weight) " */
290 fprintf(conf, "(%u, %d) ",
291 connected_neurons[i] - first_neuron,
292 (int) floor((weights[i] * fixed_multiplier) + 0.5));
296 /* save the connection "(source weight) " */
297 fprintf(conf, "(%u, " FANNPRINTF ") ", connected_neurons[i] - first_neuron, weights[i]);
300 /* save the connection "(source weight) " */
301 fprintf(conf, "(%u, " FANNPRINTF ") ", connected_neurons[i] - first_neuron, weights[i]);
307 return calculated_decimal_point;
310 struct fann *fann_create_from_fd_1_1(FILE * conf, const char *configuration_file);
312 #define fann_scanf(type, name, val) \
314 if(fscanf(conf, name"="type"\n", val) != 1) \
316 fann_error(NULL, FANN_E_CANT_READ_CONFIG, name, configuration_file); \
323 Create a network from a configuration file descriptor.
325 struct fann *fann_create_from_fd(FILE * conf, const char *configuration_file)
327 unsigned int num_layers, layer_size, input_neuron, i, num_connections;
330 unsigned int decimal_point, multiplier;
332 struct fann_neuron *first_neuron, *neuron_it, *last_neuron, **connected_neurons;
334 struct fann_layer *layer_it;
335 struct fann *ann = NULL;
339 read_version = (char *) calloc(strlen(FANN_CONF_VERSION "\n"), 1);
340 if(read_version == NULL)
342 fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
346 fread(read_version, 1, strlen(FANN_CONF_VERSION "\n"), conf); /* reads version */
348 /* compares the version information */
349 if(strncmp(read_version, FANN_CONF_VERSION "\n", strlen(FANN_CONF_VERSION "\n")) != 0)
352 if(strncmp(read_version, "FANN_FIX_1.1\n", strlen("FANN_FIX_1.1\n")) == 0)
355 if(strncmp(read_version, "FANN_FLO_1.1\n", strlen("FANN_FLO_1.1\n")) == 0)
359 return fann_create_from_fd_1_1(conf, configuration_file);
363 fann_error(NULL, FANN_E_WRONG_CONFIG_VERSION, configuration_file);
371 fann_scanf("%u", "decimal_point", &decimal_point);
372 multiplier = 1 << decimal_point;
375 fann_scanf("%u", "num_layers", &num_layers);
377 ann = fann_allocate_structure(num_layers);
383 fann_scanf("%f", "learning_rate", &ann->learning_rate);
384 fann_scanf("%f", "connection_rate", &ann->connection_rate);
385 fann_scanf("%u", "shortcut_connections", &ann->shortcut_connections);
386 fann_scanf("%f", "learning_momentum", &ann->learning_momentum);
387 fann_scanf("%u", "training_algorithm", (unsigned int *)&ann->training_algorithm);
388 fann_scanf("%u", "train_error_function", (unsigned int *)&ann->train_error_function);
389 fann_scanf("%u", "train_stop_function", (unsigned int *)&ann->train_stop_function);
390 fann_scanf("%f", "cascade_output_change_fraction", &ann->cascade_output_change_fraction);
391 fann_scanf("%f", "quickprop_decay", &ann->quickprop_decay);
392 fann_scanf("%f", "quickprop_mu", &ann->quickprop_mu);
393 fann_scanf("%f", "rprop_increase_factor", &ann->rprop_increase_factor);
394 fann_scanf("%f", "rprop_decrease_factor", &ann->rprop_decrease_factor);
395 fann_scanf("%f", "rprop_delta_min", &ann->rprop_delta_min);
396 fann_scanf("%f", "rprop_delta_max", &ann->rprop_delta_max);
397 fann_scanf("%f", "rprop_delta_zero", &ann->rprop_delta_zero);
398 fann_scanf("%u", "cascade_output_stagnation_epochs", &ann->cascade_output_stagnation_epochs);
399 fann_scanf("%f", "cascade_candidate_change_fraction", &ann->cascade_candidate_change_fraction);
400 fann_scanf("%u", "cascade_candidate_stagnation_epochs", &ann->cascade_candidate_stagnation_epochs);
401 fann_scanf("%u", "cascade_max_out_epochs", &ann->cascade_max_out_epochs);
402 fann_scanf("%u", "cascade_max_cand_epochs", &ann->cascade_max_cand_epochs);
403 fann_scanf("%u", "cascade_num_candidate_groups", &ann->cascade_num_candidate_groups);
405 fann_scanf(FANNSCANF, "bit_fail_limit", &ann->bit_fail_limit);
406 fann_scanf(FANNSCANF, "cascade_candidate_limit", &ann->cascade_candidate_limit);
407 fann_scanf(FANNSCANF, "cascade_weight_multiplier", &ann->cascade_weight_multiplier);
410 fann_scanf("%u", "cascade_activation_functions_count", &ann->cascade_activation_functions_count);
413 ann->cascade_activation_functions =
414 (enum fann_activationfunc_enum *)realloc(ann->cascade_activation_functions,
415 ann->cascade_activation_functions_count * sizeof(enum fann_activationfunc_enum));
416 if(ann->cascade_activation_functions == NULL)
418 fann_error((struct fann_error*)ann, FANN_E_CANT_ALLOCATE_MEM);
423 fscanf(conf, "cascade_activation_functions=");
424 for(i = 0; i < ann->cascade_activation_functions_count; i++)
425 fscanf(conf, "%u ", (unsigned int *)&ann->cascade_activation_functions[i]);
427 fann_scanf("%u", "cascade_activation_steepnesses_count", &ann->cascade_activation_steepnesses_count);
430 ann->cascade_activation_steepnesses =
431 (fann_type *)realloc(ann->cascade_activation_steepnesses,
432 ann->cascade_activation_steepnesses_count * sizeof(fann_type));
433 if(ann->cascade_activation_steepnesses == NULL)
435 fann_error((struct fann_error*)ann, FANN_E_CANT_ALLOCATE_MEM);
440 fscanf(conf, "cascade_activation_steepnesses=");
441 for(i = 0; i < ann->cascade_activation_steepnesses_count; i++)
442 fscanf(conf, FANNSCANF" ", &ann->cascade_activation_steepnesses[i]);
445 ann->decimal_point = decimal_point;
446 ann->multiplier = multiplier;
450 fann_update_stepwise(ann);
454 printf("creating network with %d layers\n", num_layers);
458 fscanf(conf, "layer_sizes=");
459 /* determine how many neurons there should be in each layer */
460 for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++)
462 if(fscanf(conf, "%u ", &layer_size) != 1)
464 fann_error((struct fann_error *) ann, FANN_E_CANT_READ_CONFIG, "layer_sizes", configuration_file);
468 /* we do not allocate room here, but we make sure that
469 * last_neuron - first_neuron is the number of neurons */
470 layer_it->first_neuron = NULL;
471 layer_it->last_neuron = layer_it->first_neuron + layer_size;
472 ann->total_neurons += layer_size;
474 if(ann->shortcut_connections && layer_it != ann->first_layer)
476 printf(" layer : %d neurons, 0 bias\n", layer_size);
480 printf(" layer : %d neurons, 1 bias\n", layer_size - 1);
485 ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1;
486 ann->num_output = ((ann->last_layer - 1)->last_neuron - (ann->last_layer - 1)->first_neuron);
487 if(!ann->shortcut_connections)
489 /* one too many (bias) in the output layer */
493 /* allocate room for the actual neurons */
494 fann_allocate_neurons(ann);
495 if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
501 last_neuron = (ann->last_layer - 1)->last_neuron;
502 fscanf(conf, "neurons (num_inputs, activation_function, activation_steepness)=");
503 for(neuron_it = ann->first_layer->first_neuron; neuron_it != last_neuron; neuron_it++)
506 (conf, "(%u, %u, " FANNSCANF ") ", &num_connections, (unsigned int *)&neuron_it->activation_function,
507 &neuron_it->activation_steepness) != 3)
509 fann_error((struct fann_error *) ann, FANN_E_CANT_READ_NEURON, configuration_file);
513 neuron_it->first_con = ann->total_connections;
514 ann->total_connections += num_connections;
515 neuron_it->last_con = ann->total_connections;
518 fann_allocate_connections(ann);
519 if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
525 connected_neurons = ann->connections;
526 weights = ann->weights;
527 first_neuron = ann->first_layer->first_neuron;
529 fscanf(conf, "connections (connected_to_neuron, weight)=");
530 for(i = 0; i < ann->total_connections; i++)
532 if(fscanf(conf, "(%u, " FANNSCANF ") ", &input_neuron, &weights[i]) != 2)
534 fann_error((struct fann_error *) ann, FANN_E_CANT_READ_CONNECTIONS, configuration_file);
538 connected_neurons[i] = first_neuron + input_neuron;
549 Create a network from a configuration file descriptor. (backward compatible read of version 1.1 files)
551 struct fann *fann_create_from_fd_1_1(FILE * conf, const char *configuration_file)
553 unsigned int num_layers, layer_size, input_neuron, i, shortcut_connections, num_connections;
554 unsigned int activation_function_hidden, activation_function_output;
556 unsigned int decimal_point, multiplier;
558 fann_type activation_steepness_hidden, activation_steepness_output;
559 float learning_rate, connection_rate;
560 struct fann_neuron *first_neuron, *neuron_it, *last_neuron, **connected_neurons;
562 struct fann_layer *layer_it;
566 if(fscanf(conf, "%u\n", &decimal_point) != 1)
568 fann_error(NULL, FANN_E_CANT_READ_CONFIG, "decimal_point", configuration_file);
571 multiplier = 1 << decimal_point;
574 if(fscanf(conf, "%u %f %f %u %u %u " FANNSCANF " " FANNSCANF "\n", &num_layers, &learning_rate,
575 &connection_rate, &shortcut_connections, &activation_function_hidden,
576 &activation_function_output, &activation_steepness_hidden,
577 &activation_steepness_output) != 8)
579 fann_error(NULL, FANN_E_CANT_READ_CONFIG, "parameters", configuration_file);
583 ann = fann_allocate_structure(num_layers);
588 ann->connection_rate = connection_rate;
589 ann->shortcut_connections = shortcut_connections;
590 ann->learning_rate = learning_rate;
593 ann->decimal_point = decimal_point;
594 ann->multiplier = multiplier;
598 fann_update_stepwise(ann);
602 printf("creating network with learning rate %f\n", learning_rate);
606 /* determine how many neurons there should be in each layer */
607 for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++)
609 if(fscanf(conf, "%u ", &layer_size) != 1)
611 fann_error((struct fann_error *) ann, FANN_E_CANT_READ_NEURON, configuration_file);
615 /* we do not allocate room here, but we make sure that
616 * last_neuron - first_neuron is the number of neurons */
617 layer_it->first_neuron = NULL;
618 layer_it->last_neuron = layer_it->first_neuron + layer_size;
619 ann->total_neurons += layer_size;
621 if(ann->shortcut_connections && layer_it != ann->first_layer)
623 printf(" layer : %d neurons, 0 bias\n", layer_size);
627 printf(" layer : %d neurons, 1 bias\n", layer_size - 1);
632 ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1;
633 ann->num_output = ((ann->last_layer - 1)->last_neuron - (ann->last_layer - 1)->first_neuron);
634 if(!ann->shortcut_connections)
636 /* one too many (bias) in the output layer */
640 /* allocate room for the actual neurons */
641 fann_allocate_neurons(ann);
642 if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
648 last_neuron = (ann->last_layer - 1)->last_neuron;
649 for(neuron_it = ann->first_layer->first_neuron; neuron_it != last_neuron; neuron_it++)
651 if(fscanf(conf, "%u ", &num_connections) != 1)
653 fann_error((struct fann_error *) ann, FANN_E_CANT_READ_NEURON, configuration_file);
657 neuron_it->first_con = ann->total_connections;
658 ann->total_connections += num_connections;
659 neuron_it->last_con = ann->total_connections;
662 fann_allocate_connections(ann);
663 if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
669 connected_neurons = ann->connections;
670 weights = ann->weights;
671 first_neuron = ann->first_layer->first_neuron;
673 for(i = 0; i < ann->total_connections; i++)
675 if(fscanf(conf, "(%u " FANNSCANF ") ", &input_neuron, &weights[i]) != 2)
677 fann_error((struct fann_error *) ann, FANN_E_CANT_READ_CONNECTIONS, configuration_file);
681 connected_neurons[i] = first_neuron + input_neuron;
684 fann_set_activation_steepness_hidden(ann, activation_steepness_hidden);
685 fann_set_activation_steepness_output(ann, activation_steepness_output);
686 fann_set_activation_function_hidden(ann, (enum fann_activationfunc_enum)activation_function_hidden);
687 fann_set_activation_function_output(ann, (enum fann_activationfunc_enum)activation_function_output);