2 Fast Artificial Neural Network Library (fann)
3 Copyright (C) 2003 Steffen Nissen (lukesky@diku.dk)
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 /*#define DEBUGTRAIN*/
32 Calculates the derived of a value, given an activation function
35 fann_type fann_activation_derived(unsigned int activation_function,
36 fann_type steepness, fann_type value, fann_type sum)
38 switch (activation_function)
41 case FANN_LINEAR_PIECE:
42 case FANN_LINEAR_PIECE_SYMMETRIC:
43 return (fann_type) fann_linear_derive(steepness, value);
45 case FANN_SIGMOID_STEPWISE:
46 value = fann_clip(value, 0.01f, 0.99f);
47 return (fann_type) fann_sigmoid_derive(steepness, value);
48 case FANN_SIGMOID_SYMMETRIC:
49 case FANN_SIGMOID_SYMMETRIC_STEPWISE:
50 value = fann_clip(value, -0.98f, 0.98f);
51 return (fann_type) fann_sigmoid_symmetric_derive(steepness, value);
53 value = fann_clip(value, 0.01f, 0.99f);
54 return (fann_type) fann_gaussian_derive(steepness, value, sum);
55 case FANN_GAUSSIAN_SYMMETRIC:
56 value = fann_clip(value, -0.98f, 0.98f);
57 return (fann_type) fann_gaussian_symmetric_derive(steepness, value, sum);
59 value = fann_clip(value, 0.01f, 0.99f);
60 return (fann_type) fann_elliot_derive(steepness, value, sum);
61 case FANN_ELLIOT_SYMMETRIC:
62 value = fann_clip(value, -0.98f, 0.98f);
63 return (fann_type) fann_elliot_symmetric_derive(steepness, value, sum);
65 fann_error(NULL, FANN_E_CANT_TRAIN_ACTIVATION);
71 Calculates the activation of a value, given an activation function
74 fann_type fann_activation(struct fann * ann, unsigned int activation_function, fann_type steepness,
77 value = fann_mult(steepness, value);
78 fann_activation_switch(ann, activation_function, value, value);
82 /* Trains the network with the backpropagation algorithm.
84 FANN_EXTERNAL void FANN_API fann_train(struct fann *ann, fann_type * input,
85 fann_type * desired_output)
89 fann_compute_MSE(ann, desired_output);
91 fann_backpropagate_MSE(ann);
93 fann_update_weights(ann);
99 Helper function to update the MSE value and return a diff which takes symmetric functions into account
101 fann_type fann_update_MSE(struct fann *ann, struct fann_neuron* neuron, fann_type neuron_diff)
105 switch (neuron->activation_function)
107 case FANN_LINEAR_PIECE_SYMMETRIC:
108 case FANN_THRESHOLD_SYMMETRIC:
109 case FANN_SIGMOID_SYMMETRIC:
110 case FANN_SIGMOID_SYMMETRIC_STEPWISE:
111 case FANN_ELLIOT_SYMMETRIC:
112 case FANN_GAUSSIAN_SYMMETRIC:
113 neuron_diff /= (fann_type)2.0;
118 case FANN_SIGMOID_STEPWISE:
120 case FANN_GAUSSIAN_STEPWISE:
122 case FANN_LINEAR_PIECE:
128 (neuron_diff / (float) ann->multiplier) * (neuron_diff / (float) ann->multiplier);
130 neuron_diff2 = (float) (neuron_diff * neuron_diff);
133 ann->MSE_value += neuron_diff2;
135 /*printf("neuron_diff %f = (%f - %f)[/2], neuron_diff2=%f, sum=%f, MSE_value=%f, num_MSE=%d\n", neuron_diff, *desired_output, neuron_value, neuron_diff2, last_layer_begin->sum, ann->MSE_value, ann->num_MSE); */
136 if(fann_abs(neuron_diff) >= ann->bit_fail_limit)
144 /* Tests the network.
146 FANN_EXTERNAL fann_type *FANN_API fann_test(struct fann *ann, fann_type * input,
147 fann_type * desired_output)
149 fann_type neuron_value;
150 fann_type *output_begin = fann_run(ann, input);
151 fann_type *output_it;
152 const fann_type *output_end = output_begin + ann->num_output;
153 fann_type neuron_diff;
154 struct fann_neuron *output_neuron = (ann->last_layer - 1)->first_neuron;
156 /* calculate the error */
157 for(output_it = output_begin; output_it != output_end; output_it++)
159 neuron_value = *output_it;
161 neuron_diff = (*desired_output - neuron_value);
163 neuron_diff = fann_update_MSE(ann, output_neuron, neuron_diff);
173 /* get the mean square error.
175 FANN_EXTERNAL float FANN_API fann_get_MSE(struct fann *ann)
179 return ann->MSE_value / (float) ann->num_MSE;
187 FANN_EXTERNAL unsigned int fann_get_bit_fail(struct fann *ann)
189 return ann->num_bit_fail;
192 /* reset the mean square error.
194 FANN_EXTERNAL void FANN_API fann_reset_MSE(struct fann *ann)
198 ann->num_bit_fail = 0;
204 compute the error at the network output
205 (usually, after forward propagation of a certain input vector, fann_run)
206 the error is a sum of squares for all the output units
207 also increments a counter because MSE is an average of such errors
209 After this train_errors in the output layer will be set to:
210 neuron_value_derived * (desired_output - neuron_value)
212 void fann_compute_MSE(struct fann *ann, fann_type * desired_output)
214 fann_type neuron_value, neuron_diff, *error_it = 0, *error_begin = 0;
215 struct fann_neuron *last_layer_begin = (ann->last_layer - 1)->first_neuron;
216 const struct fann_neuron *last_layer_end = last_layer_begin + ann->num_output;
217 const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
219 /* if no room allocated for the error variabels, allocate it now */
220 if(ann->train_errors == NULL)
222 ann->train_errors = (fann_type *) calloc(ann->total_neurons, sizeof(fann_type));
223 if(ann->train_errors == NULL)
225 fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
231 /* clear the error variabels */
232 memset(ann->train_errors, 0, (ann->total_neurons) * sizeof(fann_type));
234 error_begin = ann->train_errors;
237 printf("\ncalculate errors\n");
239 /* calculate the error and place it in the output layer */
240 error_it = error_begin + (last_layer_begin - first_neuron);
242 for(; last_layer_begin != last_layer_end; last_layer_begin++)
244 neuron_value = last_layer_begin->value;
245 neuron_diff = *desired_output - neuron_value;
247 neuron_diff = fann_update_MSE(ann, last_layer_begin, neuron_diff);
249 if(ann->train_error_function)
250 { /* TODO make switch when more functions */
251 if(neuron_diff < -.9999999)
253 else if(neuron_diff > .9999999)
256 neuron_diff = (fann_type) log((1.0 + neuron_diff) / (1.0 - neuron_diff));
259 *error_it = fann_activation_derived(last_layer_begin->activation_function,
260 last_layer_begin->activation_steepness, neuron_value,
261 last_layer_begin->sum) * neuron_diff;
270 Propagate the error backwards from the output layer.
272 After this the train_errors in the hidden layers will be:
273 neuron_value_derived * sum(outgoing_weights * connected_neuron)
275 void fann_backpropagate_MSE(struct fann *ann)
279 struct fann_layer *layer_it;
280 struct fann_neuron *neuron_it, *last_neuron;
281 struct fann_neuron **connections;
283 fann_type *error_begin = ann->train_errors;
284 fann_type *error_prev_layer;
286 const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
287 const struct fann_layer *second_layer = ann->first_layer + 1;
288 struct fann_layer *last_layer = ann->last_layer;
290 /* go through all the layers, from last to first.
291 * And propagate the error backwards */
292 for(layer_it = last_layer - 1; layer_it > second_layer; --layer_it)
294 last_neuron = layer_it->last_neuron;
296 /* for each connection in this layer, propagate the error backwards */
297 if(ann->connection_rate >= 1)
299 if(!ann->shortcut_connections)
301 error_prev_layer = error_begin + ((layer_it - 1)->first_neuron - first_neuron);
305 error_prev_layer = error_begin;
308 for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
311 tmp_error = error_begin[neuron_it - first_neuron];
312 weights = ann->weights + neuron_it->first_con;
313 for(i = neuron_it->last_con - neuron_it->first_con; i--;)
315 /*printf("i = %d\n", i);
316 * printf("error_prev_layer[%d] = %f\n", i, error_prev_layer[i]);
317 * printf("weights[%d] = %f\n", i, weights[i]); */
318 error_prev_layer[i] += tmp_error * weights[i];
324 for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
327 tmp_error = error_begin[neuron_it - first_neuron];
328 weights = ann->weights + neuron_it->first_con;
329 connections = ann->connections + neuron_it->first_con;
330 for(i = neuron_it->last_con - neuron_it->first_con; i--;)
332 error_begin[connections[i] - first_neuron] += tmp_error * weights[i];
337 /* then calculate the actual errors in the previous layer */
338 error_prev_layer = error_begin + ((layer_it - 1)->first_neuron - first_neuron);
339 last_neuron = (layer_it - 1)->last_neuron;
341 for(neuron_it = (layer_it - 1)->first_neuron; neuron_it != last_neuron; neuron_it++)
343 *error_prev_layer *= fann_activation_derived(neuron_it->activation_function,
344 neuron_it->activation_steepness, neuron_it->value, neuron_it->sum);
352 Update weights for incremental training
354 void fann_update_weights(struct fann *ann)
356 struct fann_neuron *neuron_it, *last_neuron, *prev_neurons;
357 fann_type tmp_error, delta_w, *weights;
358 struct fann_layer *layer_it;
360 unsigned int num_connections;
362 /* store some variabels local for fast access */
363 const float learning_rate = ann->learning_rate;
364 const float learning_momentum = ann->learning_momentum;
365 struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
366 struct fann_layer *first_layer = ann->first_layer;
367 const struct fann_layer *last_layer = ann->last_layer;
368 fann_type *error_begin = ann->train_errors;
369 fann_type *deltas_begin, *weights_deltas;
371 /* if no room allocated for the deltas, allocate it now */
372 if(ann->prev_weights_deltas == NULL)
374 ann->prev_weights_deltas =
375 (fann_type *) calloc(ann->total_connections_allocated, sizeof(fann_type));
376 if(ann->prev_weights_deltas == NULL)
378 fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
384 printf("\nupdate weights\n");
386 deltas_begin = ann->prev_weights_deltas;
387 prev_neurons = first_neuron;
388 for(layer_it = (first_layer + 1); layer_it != last_layer; layer_it++)
391 printf("layer[%d]\n", layer_it - first_layer);
393 last_neuron = layer_it->last_neuron;
394 if(ann->connection_rate >= 1)
396 if(!ann->shortcut_connections)
398 prev_neurons = (layer_it - 1)->first_neuron;
400 for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
402 tmp_error = error_begin[neuron_it - first_neuron] * learning_rate;
403 num_connections = neuron_it->last_con - neuron_it->first_con;
404 weights = ann->weights + neuron_it->first_con;
405 weights_deltas = deltas_begin + neuron_it->first_con;
406 for(i = 0; i != num_connections; i++)
408 delta_w = tmp_error * prev_neurons[i].value + learning_momentum * weights_deltas[i];
409 weights[i] += delta_w ;
410 weights_deltas[i] = delta_w;
416 for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
418 tmp_error = error_begin[neuron_it - first_neuron] * learning_rate;
419 num_connections = neuron_it->last_con - neuron_it->first_con;
420 weights = ann->weights + neuron_it->first_con;
421 weights_deltas = deltas_begin + neuron_it->first_con;
422 for(i = 0; i != num_connections; i++)
424 delta_w = tmp_error * prev_neurons[i].value + learning_momentum * weights_deltas[i];
425 weights[i] += delta_w;
426 weights_deltas[i] = delta_w;
434 Update slopes for batch training
435 layer_begin = ann->first_layer+1 and layer_end = ann->last_layer-1
436 will update all slopes.
439 void fann_update_slopes_batch(struct fann *ann, struct fann_layer *layer_begin,
440 struct fann_layer *layer_end)
442 struct fann_neuron *neuron_it, *last_neuron, *prev_neurons, **connections;
444 unsigned int i, num_connections;
446 /* store some variabels local for fast access */
447 struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
448 fann_type *error_begin = ann->train_errors;
449 fann_type *slope_begin, *neuron_slope;
451 /* if no room allocated for the slope variabels, allocate it now */
452 if(ann->train_slopes == NULL)
455 (fann_type *) calloc(ann->total_connections_allocated, sizeof(fann_type));
456 if(ann->train_slopes == NULL)
458 fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
463 if(layer_begin == NULL)
465 layer_begin = ann->first_layer + 1;
468 if(layer_end == NULL)
470 layer_end = ann->last_layer - 1;
473 slope_begin = ann->train_slopes;
476 printf("\nupdate slopes\n");
479 prev_neurons = first_neuron;
481 for(; layer_begin <= layer_end; layer_begin++)
484 printf("layer[%d]\n", layer_begin - ann->first_layer);
486 last_neuron = layer_begin->last_neuron;
487 if(ann->connection_rate >= 1)
489 if(!ann->shortcut_connections)
491 prev_neurons = (layer_begin - 1)->first_neuron;
494 for(neuron_it = layer_begin->first_neuron; neuron_it != last_neuron; neuron_it++)
496 tmp_error = error_begin[neuron_it - first_neuron];
497 neuron_slope = slope_begin + neuron_it->first_con;
498 num_connections = neuron_it->last_con - neuron_it->first_con;
499 for(i = 0; i != num_connections; i++)
501 neuron_slope[i] += tmp_error * prev_neurons[i].value;
507 for(neuron_it = layer_begin->first_neuron; neuron_it != last_neuron; neuron_it++)
509 tmp_error = error_begin[neuron_it - first_neuron];
510 neuron_slope = slope_begin + neuron_it->first_con;
511 num_connections = neuron_it->last_con - neuron_it->first_con;
512 connections = ann->connections + neuron_it->first_con;
513 for(i = 0; i != num_connections; i++)
515 neuron_slope[i] += tmp_error * connections[i]->value;
523 Clears arrays used for training before a new training session.
524 Also creates the arrays that do not exist yet.
526 void fann_clear_train_arrays(struct fann *ann)
529 fann_type delta_zero;
531 /* if no room allocated for the slope variabels, allocate it now
532 * (calloc clears mem) */
533 if(ann->train_slopes == NULL)
536 (fann_type *) calloc(ann->total_connections_allocated, sizeof(fann_type));
537 if(ann->train_slopes == NULL)
539 fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
545 memset(ann->train_slopes, 0, (ann->total_connections) * sizeof(fann_type));
548 /* if no room allocated for the variabels, allocate it now */
549 if(ann->prev_steps == NULL)
551 ann->prev_steps = (fann_type *) calloc(ann->total_connections_allocated, sizeof(fann_type));
552 if(ann->prev_steps == NULL)
554 fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
560 memset(ann->prev_steps, 0, (ann->total_connections) * sizeof(fann_type));
563 /* if no room allocated for the variabels, allocate it now */
564 if(ann->prev_train_slopes == NULL)
566 ann->prev_train_slopes =
567 (fann_type *) malloc(ann->total_connections_allocated * sizeof(fann_type));
568 if(ann->prev_train_slopes == NULL)
570 fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
575 if(ann->training_algorithm == FANN_TRAIN_RPROP)
577 delta_zero = ann->rprop_delta_zero;
578 for(i = 0; i < ann->total_connections; i++)
580 ann->prev_train_slopes[i] = delta_zero;
585 memset(ann->prev_train_slopes, 0, (ann->total_connections) * sizeof(fann_type));
590 Update weights for batch training
592 void fann_update_weights_batch(struct fann *ann, unsigned int num_data, unsigned int first_weight,
593 unsigned int past_end)
595 fann_type *train_slopes = ann->train_slopes;
596 fann_type *weights = ann->weights;
597 const float epsilon = ann->learning_rate / num_data;
598 unsigned int i = first_weight;
600 for(; i != past_end; i++)
602 weights[i] += train_slopes[i] * epsilon;
603 train_slopes[i] = 0.0;
608 The quickprop training algorithm
610 void fann_update_weights_quickprop(struct fann *ann, unsigned int num_data,
611 unsigned int first_weight, unsigned int past_end)
613 fann_type *train_slopes = ann->train_slopes;
614 fann_type *weights = ann->weights;
615 fann_type *prev_steps = ann->prev_steps;
616 fann_type *prev_train_slopes = ann->prev_train_slopes;
618 fann_type w, prev_step, slope, prev_slope, next_step;
620 float epsilon = ann->learning_rate / num_data;
621 float decay = ann->quickprop_decay; /*-0.0001;*/
622 float mu = ann->quickprop_mu; /*1.75; */
623 float shrink_factor = (float) (mu / (1.0 + mu));
625 unsigned int i = first_weight;
627 for(; i != past_end; i++)
630 prev_step = prev_steps[i];
631 slope = train_slopes[i] + decay * w;
632 prev_slope = prev_train_slopes[i];
635 if(prev_step > 999 || prev_step < -999)
638 prev_step = prev_steps[i];
641 /* The step must always be in direction opposite to the slope. */
642 if(prev_step > 0.001)
644 /* If last step was positive... */
647 /* Add in linear term if current slope is still positive. */
648 next_step += epsilon * slope;
651 /*If current slope is close to or larger than prev slope... */
652 if(slope > (shrink_factor * prev_slope))
654 next_step += mu * prev_step; /* Take maximum size negative step. */
658 next_step += prev_step * slope / (prev_slope - slope); /* Else, use quadratic estimate. */
661 else if(prev_step < -0.001)
663 /* If last step was negative... */
666 /* Add in linear term if current slope is still negative. */
667 next_step += epsilon * slope;
670 /* If current slope is close to or more neg than prev slope... */
671 if(slope < (shrink_factor * prev_slope))
673 next_step += mu * prev_step; /* Take maximum size negative step. */
677 next_step += prev_step * slope / (prev_slope - slope); /* Else, use quadratic estimate. */
682 /* Last step was zero, so use only linear term. */
683 next_step += epsilon * slope;
686 if(next_step > 1000 || next_step < -1000)
689 printf("quickprop[%d] weight=%f, slope=%f, prev_slope=%f, next_step=%f, prev_step=%f\n",
690 i, weights[i], slope, prev_slope, next_step, prev_step);
699 /* update global data arrays */
700 prev_steps[i] = next_step;
710 prev_train_slopes[i] = slope;
711 train_slopes[i] = 0.0;
716 The iRprop- algorithm
718 void fann_update_weights_irpropm(struct fann *ann, unsigned int first_weight, unsigned int past_end)
720 fann_type *train_slopes = ann->train_slopes;
721 fann_type *weights = ann->weights;
722 fann_type *prev_steps = ann->prev_steps;
723 fann_type *prev_train_slopes = ann->prev_train_slopes;
725 fann_type prev_step, slope, prev_slope, next_step, same_sign;
727 /* These should be set from variables */
728 float increase_factor = ann->rprop_increase_factor; /*1.2; */
729 float decrease_factor = ann->rprop_decrease_factor; /*0.5; */
730 float delta_min = ann->rprop_delta_min; /*0.0; */
731 float delta_max = ann->rprop_delta_max; /*50.0; */
733 unsigned int i = first_weight;
735 for(; i != past_end; i++)
737 prev_step = fann_max(prev_steps[i], (fann_type) 0.001); /* prev_step may not be zero because then the training will stop */
738 slope = train_slopes[i];
739 prev_slope = prev_train_slopes[i];
741 same_sign = prev_slope * slope;
745 next_step = fann_min(prev_step * increase_factor, delta_max);
747 else if(same_sign < 0.0)
749 next_step = fann_max(prev_step * decrease_factor, delta_min);
759 weights[i] -= next_step;
763 weights[i] += next_step;
767 * printf("weight=%f, slope=%f, next_step=%f, prev_step=%f\n", weights[i], slope, next_step, prev_step);
770 /* update global data arrays */
771 prev_steps[i] = next_step;
772 prev_train_slopes[i] = slope;
773 train_slopes[i] = 0.0;
779 FANN_GET_SET(enum fann_train_enum, training_algorithm)
780 FANN_GET_SET(float, learning_rate)
782 FANN_EXTERNAL void FANN_API fann_set_activation_function_hidden(struct fann *ann,
783 enum fann_activationfunc_enum activation_function)
785 struct fann_neuron *last_neuron, *neuron_it;
786 struct fann_layer *layer_it;
787 struct fann_layer *last_layer = ann->last_layer - 1; /* -1 to not update the output layer */
789 for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
791 last_neuron = layer_it->last_neuron;
792 for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
794 neuron_it->activation_function = activation_function;
799 FANN_EXTERNAL struct fann_layer* FANN_API fann_get_layer(struct fann *ann, int layer)
801 if(layer <= 0 || layer >= (ann->last_layer - ann->first_layer))
803 fann_error((struct fann_error *) ann, FANN_E_INDEX_OUT_OF_BOUND, layer);
807 return ann->first_layer + layer;
810 FANN_EXTERNAL struct fann_neuron* FANN_API fann_get_neuron_layer(struct fann *ann, struct fann_layer* layer, int neuron)
812 if(neuron >= (layer->first_neuron - layer->last_neuron))
814 fann_error((struct fann_error *) ann, FANN_E_INDEX_OUT_OF_BOUND, neuron);
818 return layer->first_neuron + neuron;
821 FANN_EXTERNAL struct fann_neuron* FANN_API fann_get_neuron(struct fann *ann, unsigned int layer, int neuron)
823 struct fann_layer *layer_it = fann_get_layer(ann, layer);
826 return fann_get_neuron_layer(ann, layer_it, neuron);
829 FANN_EXTERNAL void FANN_API fann_set_activation_function(struct fann *ann,
830 enum fann_activationfunc_enum
835 struct fann_neuron* neuron_it = fann_get_neuron(ann, layer, neuron);
836 if(neuron_it == NULL)
839 neuron_it->activation_function = activation_function;
842 FANN_EXTERNAL void FANN_API fann_set_activation_function_layer(struct fann *ann,
843 enum fann_activationfunc_enum
847 struct fann_neuron *last_neuron, *neuron_it;
848 struct fann_layer *layer_it = fann_get_layer(ann, layer);
853 last_neuron = layer_it->last_neuron;
854 for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
856 neuron_it->activation_function = activation_function;
861 FANN_EXTERNAL void FANN_API fann_set_activation_function_output(struct fann *ann,
862 enum fann_activationfunc_enum activation_function)
864 struct fann_neuron *last_neuron, *neuron_it;
865 struct fann_layer *last_layer = ann->last_layer - 1;
867 last_neuron = last_layer->last_neuron;
868 for(neuron_it = last_layer->first_neuron; neuron_it != last_neuron; neuron_it++)
870 neuron_it->activation_function = activation_function;
874 FANN_EXTERNAL void FANN_API fann_set_activation_steepness_hidden(struct fann *ann,
877 struct fann_neuron *last_neuron, *neuron_it;
878 struct fann_layer *layer_it;
879 struct fann_layer *last_layer = ann->last_layer - 1; /* -1 to not update the output layer */
881 for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
883 last_neuron = layer_it->last_neuron;
884 for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
886 neuron_it->activation_steepness = steepness;
891 FANN_EXTERNAL void FANN_API fann_set_activation_steepness(struct fann *ann,
896 struct fann_neuron* neuron_it = fann_get_neuron(ann, layer, neuron);
897 if(neuron_it == NULL)
900 neuron_it->activation_steepness = steepness;
903 FANN_EXTERNAL void FANN_API fann_set_activation_steepness_layer(struct fann *ann,
907 struct fann_neuron *last_neuron, *neuron_it;
908 struct fann_layer *layer_it = fann_get_layer(ann, layer);
913 last_neuron = layer_it->last_neuron;
914 for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
916 neuron_it->activation_steepness = steepness;
920 FANN_EXTERNAL void FANN_API fann_set_activation_steepness_output(struct fann *ann,
923 struct fann_neuron *last_neuron, *neuron_it;
924 struct fann_layer *last_layer = ann->last_layer - 1;
926 last_neuron = last_layer->last_neuron;
927 for(neuron_it = last_layer->first_neuron; neuron_it != last_neuron; neuron_it++)
929 neuron_it->activation_steepness = steepness;
933 FANN_GET_SET(enum fann_errorfunc_enum, train_error_function)
934 FANN_GET_SET(fann_callback_type, callback)
935 FANN_GET_SET(float, quickprop_decay)
936 FANN_GET_SET(float, quickprop_mu)
937 FANN_GET_SET(float, rprop_increase_factor)
938 FANN_GET_SET(float, rprop_decrease_factor)
939 FANN_GET_SET(float, rprop_delta_min)
940 FANN_GET_SET(float, rprop_delta_max)
941 FANN_GET_SET(enum fann_stopfunc_enum, train_stop_function)
942 FANN_GET_SET(fann_type, bit_fail_limit)
943 FANN_GET_SET(float, learning_momentum)