fann_sys/lib.rs
1//! Raw bindings to C functions of the Fast Artificial Neural Network library
2//!
3//!
4//! # Creation/Execution
5//!
6//! The FANN library is designed to be very easy to use.
7//! A feedforward ANN can be created by a simple `fann_create_standard` function, while
8//! other ANNs can be created just as easily. The ANNs can be trained by `fann_train_on_file`
9//! and executed by `fann_run`.
10//!
11//! All of this can be done without much knowledge of the internals of ANNs, although the ANNs
12//! created will still be powerful and effective. If you have more knowledge about ANNs, and desire
13//! more control, almost every part of the ANNs can be parametrized to create specialized and highly
14//! optimal ANNs.
15//!
16//!
17//! # Training
18//!
19//! There are many different ways of training neural networks and the FANN library supports
20//! a number of different approaches.
21//!
22//! Two fundementally different approaches are the most commonly used:
23//!
24//! * Fixed topology training - The size and topology of the ANN is determined in advance
25//! and the training alters the weights in order to minimize the difference between
26//! the desired output values and the actual output values. This kind of training is
27//! supported by `fann_train_on_data`.
28//!
29//! * Evolving topology training - The training start out with an empty ANN, only consisting
30//! of input and output neurons. Hidden neurons and connections are added during training,
31//! in order to achieve the same goal as for fixed topology training. This kind of training
32//! is supported by FANN Cascade Training.
33//!
34//!
35//! # Cascade Training
36//!
37//! Cascade training differs from ordinary training in the sense that it starts with an empty neural
38//! network and then adds neurons one by one, while it trains the neural network. The main benefit
39//! of this approach is that you do not have to guess the number of hidden layers and neurons prior
40//! to training, but cascade training has also proved better at solving some problems.
41//!
42//! The basic idea of cascade training is that a number of candidate neurons are trained separate
43//! from the real network, then the most promising of these candidate neurons is inserted into the
44//! neural network. Then the output connections are trained and new candidate neurons are prepared.
45//! The candidate neurons are created as shortcut connected neurons in a new hidden layer, which
46//! means that the final neural network will consist of a number of hidden layers with one shortcut
47//! connected neuron in each.
48//!
49//!
50//! # File Input/Output
51//!
52//! It is possible to save an entire ann to a file with `fann_save` for future loading with
53//! `fann_create_from_file`.
54//!
55//!
56//! # Error Handling
57//!
58//! Errors from the FANN library are usually reported on `stderr`.
59//! It is however possible to redirect these error messages to a file,
60//! or completely ignore them with the `fann_set_error_log` function.
61//!
62//! It is also possible to inspect the last error message by using the
63//! `fann_get_errno` and `fann_get_errstr` functions.
64//!
65//!
66//! # Datatypes
67//!
68//! The two main datatypes used in the FANN library are `fann`,
69//! which represents an artificial neural network, and `fann_train_data`,
70//! which represents training data.
71#![allow(non_camel_case_types)]
72
73// TODO: Cross-link the documentation.
74
75extern crate libc;
76
77pub use fann_activationfunc_enum::*;
78pub use fann_errno_enum::*;
79pub use fann_errorfunc_enum::*;
80pub use fann_nettype_enum::*;
81pub use fann_stopfunc_enum::*;
82pub use fann_train_enum::*;
83use libc::{c_char, c_float, c_int, c_uint, c_void};
84
85use libc::FILE;
86
87#[cfg(feature = "double")]
88type fann_type_internal = libc::c_double;
89#[cfg(not(feature = "double"))]
90type fann_type_internal = c_float;
91
92/// The type of weights, inputs and outputs in a neural network. In the Rust bindings, it is
93/// defined as `c_float` by default, and as `c_double`, if the `double` feature is configured.
94///
95/// In the FANN C library, `fann_type` is defined as:
96///
97/// * `float` - if you include fann.h or floatfann.h
98/// * `double` - if you include doublefann.h
99/// * `int` - if you include fixedfann.h (only for executing a network, not training).
100pub type fann_type = fann_type_internal;
101
102/// Error events on `fann` and `fann_train_data`.
103#[repr(C)]
104#[derive(Copy, Clone)]
105pub enum fann_errno_enum {
106 /// No error
107 FANN_E_NO_ERROR = 0,
108 /// Unable to open configuration file for reading
109 FANN_E_CANT_OPEN_CONFIG_R,
110 /// Unable to open configuration file for writing
111 FANN_E_CANT_OPEN_CONFIG_W,
112 /// Wrong version of configuration file
113 FANN_E_WRONG_CONFIG_VERSION,
114 /// Error reading info from configuration file
115 FANN_E_CANT_READ_CONFIG,
116 /// Error reading neuron info from configuration file
117 FANN_E_CANT_READ_NEURON,
118 /// Error reading connections from configuration file
119 FANN_E_CANT_READ_CONNECTIONS,
120 /// Number of connections not equal to the number expected
121 FANN_E_WRONG_NUM_CONNECTIONS,
122 /// Unable to open train data file for writing
123 FANN_E_CANT_OPEN_TD_W,
124 /// Unable to open train data file for reading
125 FANN_E_CANT_OPEN_TD_R,
126 /// Error reading training data from file
127 FANN_E_CANT_READ_TD,
128 /// Unable to allocate memory
129 FANN_E_CANT_ALLOCATE_MEM,
130 /// Unable to train with the selected activation function
131 FANN_E_CANT_TRAIN_ACTIVATION,
132 /// Unable to use the selected activation function
133 FANN_E_CANT_USE_ACTIVATION,
134 /// Irreconcilable differences between two `fann_train_data` structures
135 FANN_E_TRAIN_DATA_MISMATCH,
136 /// Unable to use the selected training algorithm
137 FANN_E_CANT_USE_TRAIN_ALG,
138 /// Trying to take subset which is not within the training set
139 FANN_E_TRAIN_DATA_SUBSET,
140 /// Index is out of bound
141 FANN_E_INDEX_OUT_OF_BOUND,
142 /// Scaling parameters not present
143 FANN_E_SCALE_NOT_PRESENT,
144}
145
146/// The Training algorithms used when training on `fann_train_data` with functions like
147/// `fann_train_on_data` or `fann_train_on_file`. The incremental training alters the weights
148/// after each time it is presented an input pattern, while batch only alters the weights once after
149/// it has been presented to all the patterns.
150#[repr(C)]
151#[derive(Copy, Clone)]
152pub enum fann_train_enum {
153 /// Standard backpropagation algorithm, where the weights are
154 /// updated after each training pattern. This means that the weights are updated many
155 /// times during a single epoch. For this reason some problems will train very fast with
156 /// this algorithm, while other more advanced problems will not train very well.
157 FANN_TRAIN_INCREMENTAL = 0,
158 /// Standard backpropagation algorithm, where the weights are updated after calculating the mean
159 /// square error for the whole training set. This means that the weights are only updated once
160 /// during an epoch. For this reason some problems will train slower with this algorithm. But
161 /// since the mean square error is calculated more correctly than in incremental training, some
162 /// problems will reach better solutions with this algorithm.
163 FANN_TRAIN_BATCH,
164 /// A more advanced batch training algorithm which achieves good results
165 /// for many problems. The RPROP training algorithm is adaptive, and does therefore not
166 /// use the `learning_rate`. Some other parameters can however be set to change the way the
167 /// RPROP algorithm works, but it is only recommended for users with insight in how the RPROP
168 /// training algorithm works. The RPROP training algorithm is described by
169 /// [Riedmiller and Braun, 1993], but the actual learning algorithm used here is the
170 /// iRPROP- training algorithm which is described by [Igel and Husken, 2000] which
171 /// is a variant of the standard RPROP training algorithm.
172 FANN_TRAIN_RPROP,
173 /// A more advanced batch training algorithm which achieves good results
174 /// for many problems. The quickprop training algorithm uses the `learning_rate` parameter
175 /// along with other more advanced parameters, but it is only recommended to change these
176 /// advanced parameters for users with insight in how the quickprop training algorithm works.
177 /// The quickprop training algorithm is described by [Fahlman, 1988].
178 FANN_TRAIN_QUICKPROP,
179}
180
181/// The activation functions used for the neurons during training. The activation functions
182/// can either be defined for a group of neurons by `fann_set_activation_function_hidden` and
183/// `fann_set_activation_function_output`, or it can be defined for a single neuron by
184/// `fann_set_activation_function`.
185///
186/// The steepness of an activation function is defined in the same way by
187/// `fann_set_activation_steepness_hidden`, `fann_set_activation_steepness_output` and
188/// `fann_set_activation_steepness`.
189///
190/// The functions are described with functions where:
191///
192/// * x is the input to the activation function,
193///
194/// * y is the output,
195///
196/// * s is the steepness and
197///
198/// * d is the derivation.
199#[repr(C)]
200#[derive(Copy, Clone)]
201pub enum fann_activationfunc_enum {
202 /// Neuron does not exist or does not have an activation function.
203 FANN_NONE = -1,
204 /// Linear activation function.
205 ///
206 /// * span: -inf < y < inf
207 ///
208 /// * y = x*s, d = 1*s
209 ///
210 /// * Can NOT be used in fixed point.
211 FANN_LINEAR = 0,
212 /// Threshold activation function.
213 ///
214 /// * x < 0 -> y = 0, x >= 0 -> y = 1
215 ///
216 /// * Can NOT be used during training.
217 FANN_THRESHOLD,
218 /// Threshold activation function.
219 ///
220 /// * x < 0 -> y = 0, x >= 0 -> y = 1
221 ///
222 /// * Can NOT be used during training.
223 FANN_THRESHOLD_SYMMETRIC,
224 /// Sigmoid activation function.
225 ///
226 /// * One of the most used activation functions.
227 ///
228 /// * span: 0 < y < 1
229 ///
230 /// * y = 1/(1 + exp(-2*s*x))
231 ///
232 /// * d = 2*s*y*(1 - y)
233 FANN_SIGMOID,
234 /// Stepwise linear approximation to sigmoid.
235 ///
236 /// * Faster than sigmoid but a bit less precise.
237 FANN_SIGMOID_STEPWISE,
238 /// Symmetric sigmoid activation function, aka. tanh.
239 ///
240 /// * One of the most used activation functions.
241 ///
242 /// * span: -1 < y < 1
243 ///
244 /// * y = tanh(s*x) = 2/(1 + exp(-2*s*x)) - 1
245 ///
246 /// * d = s*(1-(y*y))
247 FANN_SIGMOID_SYMMETRIC,
248 /// Stepwise linear approximation to symmetric sigmoid.
249 ///
250 /// * Faster than symmetric sigmoid but a bit less precise.
251 FANN_SIGMOID_SYMMETRIC_STEPWISE,
252 /// Gaussian activation function.
253 ///
254 /// * 0 when x = -inf, 1 when x = 0 and 0 when x = inf
255 ///
256 /// * span: 0 < y < 1
257 ///
258 /// * y = exp(-x*s*x*s)
259 ///
260 /// * d = -2*x*s*y*s
261 FANN_GAUSSIAN,
262 /// Symmetric gaussian activation function.
263 ///
264 /// * -1 when x = -inf, 1 when x = 0 and 0 when x = inf
265 ///
266 /// * span: -1 < y < 1
267 ///
268 /// * y = exp(-x*s*x*s)*2-1
269 ///
270 /// * d = -2*x*s*(y+1)*s
271 FANN_GAUSSIAN_SYMMETRIC,
272 /// Stepwise linear approximation to gaussian.
273 /// Faster than gaussian but a bit less precise.
274 /// NOT implemented yet.
275 FANN_GAUSSIAN_STEPWISE,
276 /// Fast (sigmoid like) activation function defined by David Elliott
277 ///
278 /// * span: 0 < y < 1
279 ///
280 /// * y = ((x*s) / 2) / (1 + |x*s|) + 0.5
281 ///
282 /// * d = s*1/(2*(1+|x*s|)*(1+|x*s|))
283 FANN_ELLIOTT,
284 /// Fast (symmetric sigmoid like) activation function defined by David Elliott
285 ///
286 /// * span: -1 < y < 1
287 ///
288 /// * y = (x*s) / (1 + |x*s|)
289 ///
290 /// * d = s*1/((1+|x*s|)*(1+|x*s|))
291 FANN_ELLIOTT_SYMMETRIC,
292 /// Bounded linear activation function.
293 ///
294 /// * span: 0 <= y <= 1
295 ///
296 /// * y = x*s, d = 1*s
297 FANN_LINEAR_PIECE,
298 /// Bounded linear activation function.
299 ///
300 /// * span: -1 <= y <= 1
301 ///
302 /// * y = x*s, d = 1*s
303 FANN_LINEAR_PIECE_SYMMETRIC,
304 /// Periodical sine activation function.
305 ///
306 /// * span: -1 <= y <= 1
307 ///
308 /// * y = sin(x*s)
309 ///
310 /// * d = s*cos(x*s)
311 FANN_SIN_SYMMETRIC,
312 /// Periodical cosine activation function.
313 ///
314 /// * span: -1 <= y <= 1
315 ///
316 /// * y = cos(x*s)
317 ///
318 /// * d = s*-sin(x*s)
319 FANN_COS_SYMMETRIC,
320 /// Periodical sine activation function.
321 ///
322 /// * span: 0 <= y <= 1
323 ///
324 /// * y = sin(x*s)/2+0.5
325 ///
326 /// * d = s*cos(x*s)/2
327 FANN_SIN,
328 /// Periodical cosine activation function.
329 ///
330 /// * span: 0 <= y <= 1
331 ///
332 /// * y = cos(x*s)/2+0.5
333 ///
334 /// * d = s*-sin(x*s)/2
335 FANN_COS,
336}
337
338/// Error function used during training.
339#[repr(C)]
340#[derive(Copy, Clone)]
341pub enum fann_errorfunc_enum {
342 /// Standard linear error function.
343 FANN_ERRORFUNC_LINEAR = 0,
344 /// Tanh error function, usually better but can require a lower learning rate. This error
345 /// function aggressively targets outputs that differ much from the desired, while not targeting
346 /// outputs that only differ a little that much. This activation function is not recommended for
347 /// cascade training and incremental training.
348 FANN_ERRORFUNC_TANH,
349}
350
351/// Stop criteria used during training.
352#[repr(C)]
353#[derive(Copy, Clone)]
354pub enum fann_stopfunc_enum {
355 /// Stop criterion is Mean Square Error (MSE) value.
356 FANN_STOPFUNC_MSE = 0,
357 /// Stop criterion is number of bits that fail. The number of bits means the
358 /// number of output neurons which differ more than the bit fail limit
359 /// (see `fann_get_bit_fail_limit`, `fann_set_bit_fail_limit`).
360 /// The bits are counted in all of the training data, so this number can be higher than
361 /// the number of training data.
362 FANN_STOPFUNC_BIT,
363}
364
365/// Definition of network types used by `fann_get_network_type`.
366#[repr(C)]
367#[derive(Copy, Clone)]
368pub enum fann_nettype_enum {
369 /// Each layer only has connections to the next layer.
370 FANN_NETTYPE_LAYER = 0,
371 /// Each layer has connections to all following layers.
372 FANN_NETTYPE_SHORTCUT,
373}
374
375/// This callback function can be called during training when using `fann_train_on_data`,
376/// `fann_train_on_file` or `fann_cascadetrain_on_data`.
377///
378/// The callback can be set by using `fann_set_callback` and is very useful for doing custom
379/// things during training. It is recommended to use this function when implementing custom
380/// training procedures, or when visualizing the training in a GUI etc. The parameters which the
381/// callback function takes are the parameters given to `fann_train_on_data`, plus an `epochs`
382/// parameter which tells how many epochs the training has taken so far.
383///
384/// The callback function should return an integer, if the callback function returns -1, the
385/// training will terminate.
386///
387/// Example of a callback function:
388///
389/// ```
390/// extern crate libc;
391/// extern crate fann_sys;
392///
393/// use libc::*;
394/// use fann_sys::*;
395///
396/// extern "C" fn cb(ann: *mut fann,
397/// train: *mut fann_train_data,
398/// max_epochs: c_uint,
399/// epochs_between_reports: c_uint,
400/// desired_error: c_float,
401/// epochs: c_uint) -> c_int {
402/// let mut mse = unsafe { fann_get_MSE(ann) };
403/// println!("Epochs: {}. MSE: {}. Desired MSE: {}", epochs, mse, desired_error);
404/// 0
405/// }
406///
407/// fn main() {
408/// let test_callback: fann_callback_type = Some(cb);
409/// }
410/// ```
411pub type fann_callback_type = Option<
412 extern "C" fn(
413 ann: *mut fann,
414 train: *mut fann_train_data,
415 max_epochs: c_uint,
416 epochs_between_reports: c_uint,
417 desired_error: c_float,
418 epochs: c_uint,
419 ) -> c_int,
420>;
421
422#[repr(C)]
423struct fann_neuron {
424 first_con: c_uint,
425 last_con: c_uint,
426 sum: fann_type,
427 value: fann_type,
428 activation_steepness: fann_type,
429 activation_function: fann_activationfunc_enum,
430}
431
432#[repr(C)]
433struct fann_layer {
434 first_neuron: *mut fann_neuron,
435 last_neuron: *mut fann_neuron,
436}
437
438/// Structure used to store error-related information, both
439/// `fann` and `fann_train_data` can be cast to this type.
440///
441/// # See also
442/// `fann_set_error_log`, `fann_get_errno`
443#[repr(C)]
444pub struct fann_error {
445 errno_f: fann_errno_enum,
446 error_log: *mut FILE,
447 errstr: *mut c_char,
448}
449
450/// The fast artificial neural network (`fann`) structure.
451///
452/// Data within this structure should never be accessed directly, but only by using the
453/// `fann_get_...` and `fann_set_...` functions.
454///
455/// The fann structure is created using one of the `fann_create_...` functions and each of
456/// the functions which operates on the structure takes a `fann` pointer as the first parameter.
457///
458/// # See also
459/// `fann_create_standard`, `fann_destroy`
460#[repr(C)]
461pub struct fann {
462 errno_f: fann_errno_enum,
463 error_log: *mut FILE,
464 errstr: *mut c_char,
465 learning_rate: c_float,
466 learning_momentum: c_float,
467 connection_rate: c_float,
468 network_type: fann_nettype_enum,
469 first_layer: *mut fann_layer,
470 last_layer: *mut fann_layer,
471 total_neurons: c_uint,
472 num_input: c_uint,
473 num_output: c_uint,
474 weights: *mut fann_type,
475 connections: *mut *mut fann_neuron,
476 train_errors: *mut fann_type,
477 training_algorithm: fann_train_enum,
478 total_connections: c_uint,
479 output: *mut fann_type,
480 num_mse: c_uint,
481 mse_value: c_float,
482 num_bit_fail: c_uint,
483 bit_fail_limit: fann_type,
484 train_error_function: fann_errorfunc_enum,
485 train_stop_function: fann_stopfunc_enum,
486 callback: fann_callback_type,
487 user_data: *mut c_void,
488 cascade_output_change_fraction: c_float,
489 cascade_output_stagnation_epochs: c_uint,
490 cascade_candidate_change_fraction: c_float,
491 cascade_candidate_stagnation_epochs: c_uint,
492 cascade_best_candidate: c_uint,
493 cascade_candidate_limit: fann_type,
494 cascade_weight_multiplier: fann_type,
495 cascade_max_out_epochs: c_uint,
496 cascade_max_cand_epochs: c_uint,
497 cascade_activation_functions: *mut fann_activationfunc_enum,
498 cascade_activation_functions_count: c_uint,
499 cascade_activation_steepnesses: *mut fann_type,
500 cascade_activation_steepnesses_count: c_uint,
501 cascade_num_candidate_groups: c_uint,
502 cascade_candidate_scores: *mut fann_type,
503 total_neurons_allocated: c_uint,
504 total_connections_allocated: c_uint,
505 quickprop_decay: c_float,
506 quickprop_mu: c_float,
507 rprop_increase_factor: c_float,
508 rprop_decrease_factor: c_float,
509 rprop_delta_min: c_float,
510 rprop_delta_max: c_float,
511 rprop_delta_zero: c_float,
512 train_slopes: *mut fann_type,
513 prev_steps: *mut fann_type,
514 prev_train_slopes: *mut fann_type,
515 prev_weights_deltas: *mut fann_type,
516 scale_mean_in: *mut c_float,
517 scale_deviation_in: *mut c_float,
518 scale_new_min_in: *mut c_float,
519 scale_factor_in: *mut c_float,
520 scale_mean_out: *mut c_float,
521 scale_deviation_out: *mut c_float,
522 scale_new_min_out: *mut c_float,
523 scale_factor_out: *mut c_float,
524}
525
526/// Describes a connection between two neurons and its weight.
527///
528/// # See Also
529/// `fann_get_connection_array`, `fann_set_weight_array`
530///
531/// This structure appears in FANN >= 2.1.0.
532#[repr(C)]
533#[derive(Copy, Clone, Debug, PartialEq)]
534pub struct fann_connection {
535 /// Unique number used to identify source neuron
536 pub from_neuron: c_uint,
537 /// Unique number used to identify destination neuron
538 pub to_neuron: c_uint,
539 /// The numerical value of the weight
540 pub weight: fann_type,
541}
542
543/// Structure used to store data, for use with training.
544///
545/// The data inside this structure should never be manipulated directly, but should use some
546/// of the supplied training data manipulation functions.
547///
548/// The training data structure is very useful for storing data during training and testing of a
549/// neural network.
550///
551/// # See also
552/// `fann_read_train_from_file`, `fann_train_on_data`, `fann_destroy_train`
553#[repr(C)]
554pub struct fann_train_data {
555 errno_f: fann_errno_enum,
556 error_log: *mut FILE,
557 errstr: *mut c_char,
558 num_data: c_uint,
559 num_input: c_uint,
560 num_output: c_uint,
561 input: *mut *mut fann_type,
562 output: *mut *mut fann_type,
563}
564
565#[cfg_attr(not(feature = "double"), link(name = "fann"))]
566#[cfg_attr(feature = "double", link(name = "doublefann"))]
567extern "C" {
568 pub static mut fann_default_error_log: *mut FILE;
569
570 /// Change where errors are logged to. Both `fann` and `fann_data` can be
571 /// cast to `fann_error`, so this function can be used to set either of these.
572 ///
573 /// If `log_file` is NULL, no errors will be printed.
574 ///
575 /// If `errdat` is NULL, the default log will be set. The default log is the log used when
576 /// creating `fann` and `fann_data`. This default log will also be the default for all new
577 /// structs that are created.
578 ///
579 /// The default behavior is to log them to `stderr`.
580 ///
581 /// # See also
582 /// `fann_error`
583 ///
584 /// This function appears in FANN >= 1.1.0.
585 pub fn fann_set_error_log(errdat: *mut fann_error, log_file: *mut FILE);
586
587 /// Returns the last error number.
588 ///
589 /// # See also
590 /// `fann_errno_enum`, `fann_reset_errno`
591 ///
592 /// This function appears in FANN >= 1.1.0.
593 pub fn fann_get_errno(errdat: *const fann_error) -> fann_errno_enum;
594
595 /// Resets the last error number.
596 ///
597 /// This function appears in FANN >= 1.1.0.
598 pub fn fann_reset_errno(errdat: *mut fann_error);
599
600 /// Resets the last error string.
601 ///
602 /// This function appears in FANN >= 1.1.0.
603 pub fn fann_reset_errstr(errdat: *mut fann_error);
604
605 /// Returns the last error string.
606 ///
607 /// This function calls `fann_reset_errno` and `fann_reset_errstr`.
608 ///
609 /// This function appears in FANN >= 1.1.0.
610 pub fn fann_get_errstr(errdat: *mut fann_error) -> *mut c_char;
611
612 /// Prints the last error to `stderr`.
613 ///
614 /// This function appears in FANN >= 1.1.0.
615 pub fn fann_print_error(errdat: *mut fann_error);
616
617 /// Creates a copy of a fann structure.
618 ///
619 /// Data in the user data `fann_set_user_data` is not copied, but the user data pointer is copied.
620 /// This function appears in FANN >= 2.2.0.
621 pub fn fann_copy(orig: *const fann) -> *mut fann;
622
623 /// Train one iteration with a set of inputs, and a set of desired outputs.
624 /// This training is always incremental training (see `fann_train_enum`), since
625 /// only one pattern is presented.
626 ///
627 /// # Parameters
628 ///
629 /// * `ann` - The neural network structure
630 /// * `input` - an array of inputs. This array must be exactly `fann_get_num_input`
631 /// long.
632 /// * `desired_output` - an array of desired outputs. This array must be exactly
633 /// `fann_get_num_output` long.
634 ///
635 /// # See also
636 /// `fann_train_on_data`, `fann_train_epoch`
637 ///
638 /// This function appears in FANN >= 1.0.0.
639 pub fn fann_train(ann: *mut fann, input: *const fann_type, desired_output: *const fann_type);
640
641 /// Test with a set of inputs, and a set of desired outputs.
642 /// This operation updates the mean square error, but does not
643 /// change the network in any way.
644 ///
645 /// # See also
646 /// `fann_test_data`, `fann_train`
647 ///
648 /// This function appears in FANN >= 1.0.0.
649 pub fn fann_test(
650 ann: *mut fann,
651 input: *const fann_type,
652 desired_output: *const fann_type,
653 ) -> *mut fann_type;
654
655 /// Reads the mean square error from the network.
656 ///
657 /// This value is calculated during
658 /// training or testing, and can therefore sometimes be a bit off if the weights
659 /// have been changed since the last calculation of the value.
660 ///
661 /// # See also
662 /// `fann_test_data`
663 ///
664 /// This function appears in FANN >= 1.1.0.
665 pub fn fann_get_MSE(ann: *const fann) -> c_float;
666
667 /// The number of fail bits; means the number of output neurons which differ more
668 /// than the bit fail limit (see `fann_get_bit_fail_limit`, `fann_set_bit_fail_limit`).
669 /// The bits are counted in all of the training data, so this number can be higher than
670 /// the number of training data.
671 ///
672 /// This value is reset by `fann_reset_MSE` and updated by all the same functions which also
673 /// update the MSE value (e.g. `fann_test_data`, `fann_train_epoch`)
674 ///
675 /// # See also
676 /// `fann_stopfunc_enum`, `fann_get_MSE`
677 ///
678 /// This function appears in FANN >= 2.0.0
679 pub fn fann_get_bit_fail(ann: *const fann) -> c_uint;
680
681 /// Resets the mean square error from the network.
682 ///
683 /// This function also resets the number of bits that fail.
684 ///
685 /// # See also
686 /// `fann_get_bit_fail_limit`, `fann_get_MSE`
687 ///
688 /// This function appears in FANN >= 1.1.0
689 pub fn fann_reset_MSE(ann: *mut fann);
690
691 /// Trains on an entire dataset, for a period of time.
692 ///
693 /// This training uses the training algorithm chosen by `fann_set_training_algorithm`,
694 /// and the parameters set for these training algorithms.
695 ///
696 /// # Parameters
697 ///
698 /// * `ann` - The neural network
699 /// * `data` - The data that should be used during training
700 /// * `max_epochs` - The maximum number of epochs the training should continue
701 /// * `epochs_between_reports` - The number of epochs between printing a status report to
702 /// `stdout`. A value of zero means no reports should be printed.
703 /// * `desired_error` - The desired `fann_get_MSE` or `fann_get_bit_fail`, depending on
704 /// which stop function is chosen by `fann_set_train_stop_function`.
705 ///
706 /// Instead of printing out reports every `epochs_between_reports`, a callback function can be
707 /// called (see `fann_set_callback`).
708 ///
709 /// # See also
710 /// `fann_train_on_file`, `fann_train_epoch`
711 ///
712 /// This function appears in FANN >= 1.0.0.
713 pub fn fann_train_on_data(
714 ann: *mut fann,
715 data: *const fann_train_data,
716 max_epochs: c_uint,
717 epochs_between_reports: c_uint,
718 desired_error: c_float,
719 );
720
721 /// Does the same as `fann_train_on_data`, but reads the training data directly from a file.
722 ///
723 /// # See also
724 /// `fann_train_on_data`
725 ///
726 /// This function appears in FANN >= 1.0.0.
727 pub fn fann_train_on_file(
728 ann: *mut fann,
729 filename: *const c_char,
730 max_epochs: c_uint,
731 epochs_between_reports: c_uint,
732 desired_error: c_float,
733 );
734
735 /// Train one epoch with a set of training data.
736 ///
737 /// Train one epoch with the training data stored in `data`. One epoch is where all of
738 /// the training data is considered exactly once.
739 ///
740 /// This function returns the MSE error as it is calculated either before or during
741 /// the actual training. This is not the actual MSE after the training epoch, but since
742 /// calculating this will require to go through the entire training set once more, it is
743 /// more than adequate to use this value during training.
744 ///
745 /// The training algorithm used by this function is chosen by the `fann_set_training_algorithm`
746 /// function.
747 ///
748 /// # See also
749 /// `fann_train_on_data`, `fann_test_data`
750 ///
751 /// This function appears in FANN >= 1.2.0.
752 pub fn fann_train_epoch(ann: *mut fann, data: *const fann_train_data) -> c_float;
753
754 /// Tests a set of training data and calculates the MSE for the training data.
755 ///
756 /// This function updates the MSE and the bit fail values.
757 ///
758 /// # See also
759 /// `fann_test`, `fann_get_MSE`, `fann_get_bit_fail`
760 ///
761 /// This function appears in FANN >= 1.2.0.
762 pub fn fann_test_data(ann: *mut fann, data: *const fann_train_data) -> c_float;
763
764 /// Reads a file that stores training data.
765 ///
766 /// The file must be formatted like:
767 ///
768 /// ```text
769 /// num_train_data num_input num_output
770 /// inputdata separated by space
771 /// outputdata separated by space
772 /// .
773 /// .
774 /// .
775 /// inputdata separated by space
776 /// outputdata separated by space
777 /// ```
778 ///
779 /// # See also
780 /// `fann_train_on_data`, `fann_destroy_train`, `fann_save_train`
781 ///
782 /// This function appears in FANN >= 1.0.0
783 pub fn fann_read_train_from_file(filename: *const c_char) -> *mut fann_train_data;
784
785 /// Creates the training data struct from a user supplied function.
786 /// As the training data are numerable (data 1, data 2...), the user must write
787 /// a function that receives the number of the training data set (input,output)
788 /// and returns the set.
789 ///
790 /// # Parameters
791 ///
792 /// * `num_data` - The number of training data
793 /// * `num_input` - The number of inputs per training data
794 /// * `num_output` - The number of ouputs per training data
795 /// * `user_function` - The user supplied function
796 ///
797 /// # Parameters for the user function
798 ///
799 /// * `num` - The number of the training data set
800 /// * `num_input` - The number of inputs per training data
801 /// * `num_output` - The number of ouputs per training data
802 /// * `input` - The set of inputs
803 /// * `output` - The set of desired outputs
804 ///
805 /// # See also
806 /// `fann_read_train_from_file`, `fann_train_on_data`, `fann_destroy_train`, `fann_save_train`
807 ///
808 /// This function appears in FANN >= 2.1.0
809 pub fn fann_create_train_from_callback(
810 num_data: c_uint,
811 num_input: c_uint,
812 num_output: c_uint,
813 user_function: Option<
814 extern "C" fn(
815 num: c_uint,
816 num_input: c_uint,
817 num_output: c_uint,
818 input: *mut fann_type,
819 output: *mut fann_type,
820 ),
821 >,
822 ) -> *mut fann_train_data;
823
824 /// Destructs the training data and properly deallocates all of the associated data.
825 /// Be sure to call this function when finished using the training data.
826 ///
827 /// This function appears in FANN >= 1.0.0
828 pub fn fann_destroy_train(train_data: *mut fann_train_data);
829
830 /// Shuffles training data, randomizing the order.
831 /// This is recommended for incremental training, while it has no influence during batch
832 /// training.
833 ///
834 /// This function appears in FANN >= 1.1.0.
835 pub fn fann_shuffle_train_data(train_data: *mut fann_train_data);
836
837 /// Scale input and output data based on previously calculated parameters.
838 ///
839 /// # Parameters
840 ///
841 /// * `ann` - ANN for which trained parameters were calculated before
842 /// * `data` - training data that needs to be scaled
843 ///
844 /// # See also
845 /// `fann_descale_train`, `fann_set_scaling_params`
846 ///
847 /// This function appears in FANN >= 2.1.0
848 pub fn fann_scale_train(ann: *mut fann, data: *mut fann_train_data);
849
850 /// Descale input and output data based on previously calculated parameters.
851 ///
852 /// # Parameters
853 ///
854 /// * `ann` - ann for which trained parameters were calculated before
855 /// * `data` - training data that needs to be descaled
856 ///
857 /// # See also
858 /// `fann_scale_train`, `fann_set_scaling_params`
859 ///
860 /// This function appears in FANN >= 2.1.0
861 pub fn fann_descale_train(ann: *mut fann, data: *mut fann_train_data);
862
863 /// Calculate input scaling parameters for future use based on training data.
864 ///
865 /// # Parameters
866 ///
867 /// * `ann` - ANN for which parameters need to be calculated
868 /// * `data` - training data that will be used to calculate scaling parameters
869 /// * `new_input_min` - desired lower bound in input data after scaling (not strictly followed)
870 /// * `new_input_max` - desired upper bound in input data after scaling (not strictly followed)
871 ///
872 /// # See also
873 /// `fann_set_output_scaling_params`
874 ///
875 /// This function appears in FANN >= 2.1.0
876 pub fn fann_set_input_scaling_params(
877 ann: *mut fann,
878 data: *const fann_train_data,
879 new_input_min: c_float,
880 new_input_max: c_float,
881 ) -> c_int;
882
883 /// Calculate output scaling parameters for future use based on training data.
884 ///
885 /// # Parameters
886 ///
887 /// * `ann` - ANN for which parameters need to be calculated
888 /// * `data` - training data that will be used to calculate scaling parameters
889 /// * `new_output_min` - desired lower bound in output data after scaling (not strictly
890 /// followed)
891 /// * `new_output_max` - desired upper bound in output data after scaling (not strictly
892 /// followed)
893 ///
894 /// # See also
895 /// `fann_set_input_scaling_params`
896 ///
897 /// This function appears in FANN >= 2.1.0
898 pub fn fann_set_output_scaling_params(
899 ann: *mut fann,
900 data: *const fann_train_data,
901 new_output_min: c_float,
902 new_output_max: c_float,
903 ) -> c_int;
904
905 /// Calculate input and output scaling parameters for future use based on training data.
906 ///
907 /// # Parameters
908 ///
909 /// * `ann` - ANN for which parameters need to be calculated
910 /// * `data` - training data that will be used to calculate scaling parameters
911 /// * `new_input_min` - desired lower bound in input data after scaling (not strictly followed)
912 /// * `new_input_max` - desired upper bound in input data after scaling (not strictly followed)
913 /// * `new_output_min` - desired lower bound in output data after scaling (not strictly
914 /// followed)
915 /// * `new_output_max` - desired upper bound in output data after scaling (not strictly
916 /// followed)
917 ///
918 /// # See also
919 /// `fann_set_input_scaling_params`, `fann_set_output_scaling_params`
920 ///
921 /// This function appears in FANN >= 2.1.0
922 pub fn fann_set_scaling_params(
923 ann: *mut fann,
924 data: *const fann_train_data,
925 new_input_min: c_float,
926 new_input_max: c_float,
927 new_output_min: c_float,
928 new_output_max: c_float,
929 ) -> c_int;
930
931 /// Clears scaling parameters.
932 ///
933 /// # Parameters
934 ///
935 /// * `ann` - ann for which to clear scaling parameters
936 ///
937 /// This function appears in FANN >= 2.1.0
938 pub fn fann_clear_scaling_params(ann: *mut fann) -> c_int;
939
940 /// Scale data in input vector before feeding it to the ANN based on previously calculated
941 /// parameters.
942 ///
943 /// # Parameters
944 ///
945 /// `ann` - for which scaling parameters were calculated
946 /// `input_vector` - input vector that will be scaled
947 ///
948 /// # See also
949 /// `fann_descale_input`, `fann_scale_output`
950 ///
951 /// This function appears in FANN >= 2.1.0
952 pub fn fann_scale_input(ann: *mut fann, input_vector: *mut fann_type);
953
954 /// Scale data in output vector before feeding it to the ANN based on previously calculated
955 /// parameters.
956 ///
957 /// # Parameters
958 ///
959 /// * `ann` - for which scaling parameters were calculated
960 /// * `output_vector` - output vector that will be scaled
961 ///
962 /// # See also
963 /// `fann_descale_output`, `fann_scale_intput`
964 ///
965 /// This function appears in FANN >= 2.1.0
966 pub fn fann_scale_output(ann: *mut fann, output_vector: *mut fann_type);
967
968 /// Scale data in input vector after getting it from the ANN based on previously calculated
969 /// parameters.
970 ///
971 /// # Parameters
972 ///
973 /// * `ann` - for which scaling parameters were calculated
974 /// * `input_vector` - input vector that will be descaled
975 ///
976 /// # See also
977 /// `fann_scale_input`, `fann_descale_output`
978 ///
979 /// This function appears in FANN >= 2.1.0
980 pub fn fann_descale_input(ann: *mut fann, input_vector: *mut fann_type);
981
982 /// Scale data in output vector after getting it from the ANN based on previously calculated
983 /// parameters.
984 ///
985 /// # Parameters
986 ///
987 /// * `ann` - for which scaling parameters were calculated
988 /// * `output_vector` - output vector that will be descaled
989 ///
990 /// # See also
991 /// `fann_descale_input`, `fann_scale_output`
992 ///
993 /// This function appears in FANN >= 2.1.0
994 pub fn fann_descale_output(ann: *mut fann, output_vector: *mut fann_type);
995
996 /// Scales the inputs in the training data to the specified range.
997 ///
998 /// # See also
999 /// `fann_scale_output_train_data`, `fann_scale_train_data`
1000 ///
1001 /// This function appears in FANN >= 2.0.0.
1002 pub fn fann_scale_input_train_data(
1003 train_data: *mut fann_train_data,
1004 new_min: fann_type,
1005 new_max: fann_type,
1006 );
1007
1008 /// Scales the outputs in the training data to the specified range.
1009 ///
1010 /// # See also
1011 /// `fann_scale_input_train_data`, `fann_scale_train_data`
1012 ///
1013 /// This function appears in FANN >= 2.0.0.
1014 pub fn fann_scale_output_train_data(
1015 train_data: *mut fann_train_data,
1016 new_min: fann_type,
1017 new_max: fann_type,
1018 );
1019
1020 /// Scales the inputs and outputs in the training data to the specified range.
1021 ///
1022 /// # See also
1023 /// `fann_scale_output_train_data`, `fann_scale_input_train_data`
1024 ///
1025 /// This function appears in FANN >= 2.0.0.
1026 pub fn fann_scale_train_data(
1027 train_data: *mut fann_train_data,
1028 new_min: fann_type,
1029 new_max: fann_type,
1030 );
1031
1032 /// Merges the data from `data1` and `data2` into a new `fann_train_data`.
1033 ///
1034 /// This function appears in FANN >= 1.1.0.
1035 pub fn fann_merge_train_data(
1036 data1: *const fann_train_data,
1037 data2: *const fann_train_data,
1038 ) -> *mut fann_train_data;
1039
1040 /// Returns an exact copy of a `fann_train_data`.
1041 ///
1042 /// This function appears in FANN >= 1.1.0.
1043 pub fn fann_duplicate_train_data(data: *const fann_train_data) -> *mut fann_train_data;
1044
1045 /// Returns an copy of a subset of the `fann_train_data`, starting at position `pos`
1046 /// and `length` elements forward.
1047 ///
1048 /// ```notest
1049 /// fann_subset_train_data(train_data, 0, fann_length_train_data(train_data))
1050 /// ```
1051 ///
1052 /// will do the same as `fann_duplicate_train_data`.
1053 ///
1054 /// # See also
1055 /// `fann_length_train_data`
1056 ///
1057 /// This function appears in FANN >= 2.0.0.
1058 pub fn fann_subset_train_data(
1059 data: *const fann_train_data,
1060 pos: c_uint,
1061 length: c_uint,
1062 ) -> *mut fann_train_data;
1063
1064 /// Returns the number of training patterns in the `fann_train_data`.
1065 ///
1066 /// This function appears in FANN >= 2.0.0.
1067 pub fn fann_length_train_data(data: *const fann_train_data) -> c_uint;
1068
1069 /// Returns the number of inputs in each of the training patterns in the `fann_train_data`.
1070 ///
1071 /// # See also
1072 /// `fann_num_train_data`, `fann_num_output_train_data`
1073 ///
1074 /// This function appears in FANN >= 2.0.0.
1075 pub fn fann_num_input_train_data(data: *const fann_train_data) -> c_uint;
1076
1077 /// Returns the number of outputs in each of the training patterns in the `fann_train_data`.
1078 ///
1079 /// # See also
1080 /// `fann_num_train_data`, `fann_num_input_train_data`
1081 ///
1082 /// This function appears in FANN >= 2.0.0.
1083 pub fn fann_num_output_train_data(data: *const fann_train_data) -> c_uint;
1084
1085 /// Save the training structure to a file, with the format specified in
1086 /// `fann_read_train_from_file`
1087 ///
1088 /// # Return
1089 ///
1090 /// The function returns 0 on success and -1 on failure.
1091 ///
1092 /// # See also
1093 /// `fann_read_train_from_file`, `fann_save_train_to_fixed`
1094 ///
1095 /// This function appears in FANN >= 1.0.0.
1096 pub fn fann_save_train(data: *mut fann_train_data, filename: *const c_char) -> c_int;
1097
1098 /// Saves the training structure to a fixed point data file.
1099 ///
1100 /// This function is very useful for testing the quality of a fixed point network.
1101 ///
1102 /// # Return
1103 ///
1104 /// The function returns 0 on success and -1 on failure.
1105 ///
1106 /// # See also
1107 /// `fann_save_train`
1108 ///
1109 /// This function appears in FANN >= 1.0.0.
1110 pub fn fann_save_train_to_fixed(
1111 data: *mut fann_train_data,
1112 filename: *const c_char,
1113 decimal_point: c_uint,
1114 ) -> c_int;
1115
1116 /// Return the training algorithm as described by `fann_train_enum`. This training algorithm
1117 /// is used by `fann_train_on_data` and associated functions.
1118 ///
1119 /// Note that this algorithm is also used during `fann_cascadetrain_on_data`, although only
1120 /// `FANN_TRAIN_RPROP` and `FANN_TRAIN_QUICKPROP` is allowed during cascade training.
1121 ///
1122 /// The default training algorithm is `FANN_TRAIN_RPROP`.
1123 ///
1124 /// # See also
1125 /// `fann_set_training_algorithm`, `fann_train_enum`
1126 ///
1127 /// This function appears in FANN >= 1.0.0.
1128 pub fn fann_get_training_algorithm(ann: *const fann) -> fann_train_enum;
1129
1130 /// Set the training algorithm.
1131 ///
1132 /// More info available in `fann_get_training_algorithm`.
1133 ///
1134 /// This function appears in FANN >= 1.0.0.
1135 pub fn fann_set_training_algorithm(ann: *mut fann, training_algorithm: fann_train_enum);
1136
1137 /// Return the learning rate.
1138 ///
1139 /// The learning rate is used to determine how aggressive training should be for some of the
1140 /// training algorithms (`FANN_TRAIN_INCREMENTAL`, `FANN_TRAIN_BATCH`, `FANN_TRAIN_QUICKPROP`).
1141 /// Do however note that it is not used in `FANN_TRAIN_RPROP`.
1142 ///
1143 /// The default learning rate is 0.7.
1144 ///
1145 /// # See also
1146 /// `fann_set_learning_rate`, `fann_set_training_algorithm`
1147 ///
1148 /// This function appears in FANN >= 1.0.0.
1149 pub fn fann_get_learning_rate(ann: *const fann) -> c_float;
1150
1151 /// Set the learning rate.
1152 ///
1153 /// More info available in `fann_get_learning_rate`.
1154 ///
1155 /// This function appears in FANN >= 1.0.0.
1156 pub fn fann_set_learning_rate(ann: *mut fann, learning_rate: c_float);
1157
1158 /// Get the learning momentum.
1159 ///
1160 /// The learning momentum can be used to speed up FANN_TRAIN_INCREMENTAL training.
1161 /// A too high momentum will however not benefit training. Setting momentum to 0 will
1162 /// be the same as not using the momentum parameter. The recommended value of this parameter
1163 /// is between 0.0 and 1.0.
1164 ///
1165 /// The default momentum is 0.
1166 ///
1167 /// # See also
1168 /// `fann_set_learning_momentum`, `fann_set_training_algorithm`
1169 ///
1170 /// This function appears in FANN >= 2.0.0.
1171 pub fn fann_get_learning_momentum(ann: *const fann) -> c_float;
1172
1173 /// Set the learning momentum.
1174 ///
1175 /// More info available in `fann_get_learning_momentum`.
1176 ///
1177 /// This function appears in FANN >= 2.0.0.
1178 pub fn fann_set_learning_momentum(ann: *mut fann, learning_momentum: c_float);
1179
1180 /// Get the activation function for neuron number `neuron` in layer number `layer`,
1181 /// counting the input layer as layer 0.
1182 ///
1183 /// It is not possible to get activation functions for the neurons in the input layer.
1184 ///
1185 /// Information about the individual activation functions is available at
1186 /// `fann_activationfunc_enum`.
1187 ///
1188 /// # Returns
1189 ///
1190 /// The activation function for the neuron or `FANN_NONE` if the neuron is not defined in the
1191 /// neural network.
1192 ///
1193 /// # See also
1194 /// `fann_set_activation_function_layer`, `fann_set_activation_function_hidden`,
1195 /// `fann_set_activation_function_output`, `fann_set_activation_steepness`,
1196 /// `fann_set_activation_function`
1197 ///
1198 /// This function appears in FANN >= 2.1.0.
1199 pub fn fann_get_activation_function(
1200 ann: *const fann,
1201 layer: c_int,
1202 neuron: c_int,
1203 ) -> fann_activationfunc_enum;
1204
1205 /// Set the activation function for neuron number `neuron` in layer number `layer`,
1206 /// counting the input layer as layer 0.
1207 ///
1208 /// It is not possible to set activation functions for the neurons in the input layer.
1209 ///
1210 /// When choosing an activation function it is important to note that the activation
1211 /// functions have different range. `FANN_SIGMOID` is e.g. in the 0 - 1 range while
1212 /// `FANN_SIGMOID_SYMMETRIC` is in the -1 - 1 range and `FANN_LINEAR` is unbounded.
1213 ///
1214 /// Information about the individual activation functions is available at
1215 /// `fann_activationfunc_enum`.
1216 ///
1217 /// The default activation function is `FANN_SIGMOID_STEPWISE`.
1218 ///
1219 /// # See also
1220 /// `fann_set_activation_function_layer`, `fann_set_activation_function_hidden`,
1221 /// `fann_set_activation_function_output`, `fann_set_activation_steepness`,
1222 /// `fann_get_activation_function`
1223 ///
1224 /// This function appears in FANN >= 2.0.0.
1225 pub fn fann_set_activation_function(
1226 ann: *mut fann,
1227 activation_function: fann_activationfunc_enum,
1228 layer: c_int,
1229 neuron: c_int,
1230 );
1231
1232 /// Set the activation function for all the neurons in the layer number `layer`,
1233 /// counting the input layer as layer 0.
1234 ///
1235 /// It is not possible to set activation functions for the neurons in the input layer.
1236 ///
1237 /// # See also
1238 /// `fann_set_activation_function`, `fann_set_activation_function_hidden`,
1239 /// `fann_set_activation_function_output`, `fann_set_activation_steepness_layer`
1240 ///
1241 /// This function appears in FANN >= 2.0.0.
1242 pub fn fann_set_activation_function_layer(
1243 ann: *mut fann,
1244 activation_function: fann_activationfunc_enum,
1245 layer: c_int,
1246 );
1247
1248 /// Set the activation function for all of the hidden layers.
1249 ///
1250 /// # See also
1251 /// `fann_set_activation_function`, `fann_set_activation_function_layer`,
1252 /// `fann_set_activation_function_output`, `fann_set_activation_steepness_hidden`
1253 ///
1254 /// This function appears in FANN >= 1.0.0.
1255 pub fn fann_set_activation_function_hidden(
1256 ann: *mut fann,
1257 activation_function: fann_activationfunc_enum,
1258 );
1259
1260 /// Set the activation function for the output layer.
1261 ///
1262 /// # See also
1263 /// `fann_set_activation_function`, `fann_set_activation_function_layer`,
1264 /// `fann_set_activation_function_hidden`, `fann_set_activation_steepness_output`
1265 ///
1266 /// This function appears in FANN >= 1.0.0.
1267 pub fn fann_set_activation_function_output(
1268 ann: *mut fann,
1269 activation_function: fann_activationfunc_enum,
1270 );
1271
1272 /// Get the activation steepness for neuron number `neuron` in layer number `layer`,
1273 /// counting the input layer as layer 0.
1274 ///
1275 /// It is not possible to get activation steepness for the neurons in the input layer.
1276 ///
1277 /// The steepness of an activation function says something about how fast the activation
1278 /// function goes from the minimum to the maximum. A high value for the activation function will
1279 /// also give a more aggressive training.
1280 ///
1281 /// When training neural networks where the output values should be at the extremes (usually 0
1282 /// and 1, depending on the activation function), a steep activation function can be used (e.g.
1283 /// 1.0).
1284 ///
1285 /// The default activation steepness is 0.5.
1286 ///
1287 /// # Returns
1288 /// The activation steepness for the neuron or -1 if the neuron is not defined in the neural
1289 /// network.
1290 ///
1291 /// #See also
1292 /// `fann_set_activation_steepness_layer`, `fann_set_activation_steepness_hidden`,
1293 /// `fann_set_activation_steepness_output`, `fann_set_activation_function`,
1294 /// `fann_set_activation_steepness`
1295 ///
1296 /// This function appears in FANN >= 2.1.0
1297 pub fn fann_get_activation_steepness(
1298 ann: *const fann,
1299 layer: c_int,
1300 neuron: c_int,
1301 ) -> fann_type;
1302
1303 /// Set the activation steepness for neuron number `neuron` in layer number `layer`,
1304 /// counting the input layer as layer 0.
1305 ///
1306 /// It is not possible to set activation steepness for the neurons in the input layer.
1307 ///
1308 /// The steepness of an activation function says something about how fast the activation
1309 /// function goes from the minimum to the maximum. A high value for the activation function will
1310 /// also give a more aggressive training.
1311 ///
1312 /// When training neural networks where the output values should be at the extremes (usually 0
1313 /// and 1, depending on the activation function), a steep activation function can be used (e.g.
1314 /// 1.0).
1315 ///
1316 /// The default activation steepness is 0.5.
1317 ///
1318 /// # See also
1319 /// `fann_set_activation_steepness_layer`, `fann_set_activation_steepness_hidden`,
1320 /// `fann_set_activation_steepness_output`, `fann_set_activation_function`,
1321 /// `fann_get_activation_steepness`
1322 ///
1323 /// This function appears in FANN >= 2.0.0.
1324 pub fn fann_set_activation_steepness(
1325 ann: *mut fann,
1326 steepness: fann_type,
1327 layer: c_int,
1328 neuron: c_int,
1329 );
1330
1331 /// Set the activation steepness for all neurons in layer number `layer`,
1332 /// counting the input layer as layer 0.
1333 ///
1334 /// It is not possible to set activation steepness for the neurons in the input layer.
1335 ///
1336 /// # See also
1337 /// `fann_set_activation_steepness`, `fann_set_activation_steepness_hidden`,
1338 /// `fann_set_activation_steepness_output`, `fann_set_activation_function_layer`
1339 ///
1340 /// This function appears in FANN >= 2.0.0.
1341 pub fn fann_set_activation_steepness_layer(ann: *mut fann, steepness: fann_type, layer: c_int);
1342
1343 /// Set the steepness of the activation steepness in all of the hidden layers.
1344 ///
1345 /// See also:
1346 /// `fann_set_activation_steepness`, `fann_set_activation_steepness_layer`,
1347 /// `fann_set_activation_steepness_output`, `fann_set_activation_function_hidden`
1348 ///
1349 /// This function appears in FANN >= 1.2.0.
1350 pub fn fann_set_activation_steepness_hidden(ann: *mut fann, steepness: fann_type);
1351
1352 /// Set the steepness of the activation steepness in the output layer.
1353 ///
1354 /// # See also
1355 /// `fann_set_activation_steepness`, `fann_set_activation_steepness_layer`,
1356 /// `fann_set_activation_steepness_hidden`, `fann_set_activation_function_output`
1357 ///
1358 /// This function appears in FANN >= 1.2.0.
1359 pub fn fann_set_activation_steepness_output(ann: *mut fann, steepness: fann_type);
1360
1361 /// Returns the error function used during training.
1362 ///
1363 /// The error functions are described further in `fann_errorfunc_enum`.
1364 ///
1365 /// The default error function is `FANN_ERRORFUNC_TANH`
1366 ///
1367 /// # See also
1368 /// `fann_set_train_error_function`
1369 ///
1370 /// This function appears in FANN >= 1.2.0.
1371 pub fn fann_get_train_error_function(ann: *const fann) -> fann_errorfunc_enum;
1372
1373 /// Set the error function used during training.
1374 ///
1375 /// The error functions are described further in `fann_errorfunc_enum`.
1376 ///
1377 /// # See also
1378 /// `fann_get_train_error_function`
1379 ///
1380 /// This function appears in FANN >= 1.2.0.
1381 pub fn fann_set_train_error_function(ann: *mut fann, train_error_function: fann_errorfunc_enum);
1382
1383 /// Returns the the stop function used during training.
1384 ///
1385 /// The stop function is described further in `fann_stopfunc_enum`.
1386 ///
1387 /// The default stop function is `FANN_STOPFUNC_MSE`.
1388 ///
1389 /// # See also
1390 /// `fann_get_train_stop_function`, `fann_get_bit_fail_limit`
1391 ///
1392 /// This function appears in FANN >= 2.0.0.
1393 pub fn fann_get_train_stop_function(ann: *const fann) -> fann_stopfunc_enum;
1394
1395 /// Set the stop function used during training.
1396 ///
1397 /// Returns the the stop function used during training.
1398 ///
1399 /// The stop function is described further in `fann_stopfunc_enum`.
1400 ///
1401 /// # See also
1402 /// `fann_get_train_stop_function`
1403 ///
1404 /// This function appears in FANN >= 2.0.0.
1405 pub fn fann_set_train_stop_function(ann: *mut fann, train_stop_function: fann_stopfunc_enum);
1406
1407 /// Returns the bit fail limit used during training.
1408 ///
1409 /// The bit fail limit is used during training where the `fann_stopfunc_enum` is set to
1410 /// `FANN_STOPFUNC_BIT`.
1411 ///
1412 /// The limit is the maximum accepted difference between the desired output and the actual
1413 /// output during training. Each output that diverges more than this limit is counted as an
1414 /// error bit. This difference is divided by two when dealing with symmetric activation
1415 /// functions, so that symmetric and not symmetric activation functions can use the same limit.
1416 ///
1417 /// The default bit fail limit is 0.35.
1418 ///
1419 /// # See also
1420 /// `fann_set_bit_fail_limit`
1421 ///
1422 /// This function appears in FANN >= 2.0.0.
1423 pub fn fann_get_bit_fail_limit(ann: *const fann) -> fann_type;
1424
1425 /// Set the bit fail limit used during training.
1426 ///
1427 /// # See also
1428 /// `fann_get_bit_fail_limit`
1429 ///
1430 /// This function appears in FANN >= 2.0.0.
1431 pub fn fann_set_bit_fail_limit(ann: *mut fann, bit_fail_limit: fann_type);
1432
1433 /// Sets the callback function for use during training.
1434 ///
1435 /// See `fann_callback_type` for more information about the callback function.
1436 ///
1437 /// The default callback function simply prints out some status information.
1438 ///
1439 /// This function appears in FANN >= 2.0.0.
1440 pub fn fann_set_callback(ann: *mut fann, callback: fann_callback_type);
1441
1442 /// The decay is a small negative valued number which is the factor that the weights
1443 /// should become smaller in each iteration during quickprop training. This is used
1444 /// to make sure that the weights do not become too high during training.
1445 ///
1446 /// The default decay is -0.0001.
1447 ///
1448 /// # See also
1449 /// `fann_set_quickprop_decay`
1450 ///
1451 /// This function appears in FANN >= 1.2.0.
1452 pub fn fann_get_quickprop_decay(ann: *const fann) -> c_float;
1453
1454 /// Sets the quickprop decay factor.
1455 ///
1456 /// # See also
1457 /// `fann_get_quickprop_decay`
1458 ///
1459 /// This function appears in FANN >= 1.2.0.
1460 pub fn fann_set_quickprop_decay(ann: *mut fann, quickprop_decay: c_float);
1461
1462 /// The mu factor is used to increase and decrease the step size during quickprop training.
1463 /// The mu factor should always be above 1, since it would otherwise decrease the step size
1464 /// when it was supposed to increase it.
1465 ///
1466 /// The default mu factor is 1.75.
1467 ///
1468 /// # See also
1469 /// `fann_set_quickprop_mu`
1470 ///
1471 /// This function appears in FANN >= 1.2.0.
1472 pub fn fann_get_quickprop_mu(ann: *const fann) -> c_float;
1473
1474 /// Sets the quickprop mu factor.
1475 ///
1476 /// # See also
1477 /// `fann_get_quickprop_mu`
1478 ///
1479 /// This function appears in FANN >= 1.2.0.
1480 pub fn fann_set_quickprop_mu(ann: *mut fann, quickprop_mu: c_float);
1481
1482 /// The increase factor is a value larger than 1, which is used to
1483 /// increase the step size during RPROP training.
1484 ///
1485 /// The default increase factor is 1.2.
1486 ///
1487 /// # See also
1488 /// `fann_set_rprop_increase_factor`
1489 ///
1490 /// This function appears in FANN >= 1.2.0.
1491 pub fn fann_get_rprop_increase_factor(ann: *const fann) -> c_float;
1492
1493 /// The increase factor used during RPROP training.
1494 ///
1495 /// # See also
1496 /// `fann_get_rprop_increase_factor`
1497 ///
1498 /// This function appears in FANN >= 1.2.0.
1499 pub fn fann_set_rprop_increase_factor(ann: *mut fann, rprop_increase_factor: c_float);
1500
1501 /// The decrease factor is a value smaller than 1, which is used to decrease the step size
1502 /// during RPROP training.
1503 ///
1504 /// The default decrease factor is 0.5.
1505 ///
1506 /// # See also
1507 /// `fann_set_rprop_decrease_factor`
1508 ///
1509 /// This function appears in FANN >= 1.2.0.
1510 pub fn fann_get_rprop_decrease_factor(ann: *const fann) -> c_float;
1511
1512 /// The decrease factor is a value smaller than 1, which is used to decrease the step size
1513 /// during RPROP training.
1514 ///
1515 /// # See also
1516 /// `fann_get_rprop_decrease_factor`
1517 ///
1518 /// This function appears in FANN >= 1.2.0.
1519 pub fn fann_set_rprop_decrease_factor(ann: *mut fann, rprop_decrease_factor: c_float);
1520
1521 /// The minimum step size is a small positive number determining how small the minimum step size
1522 /// may be.
1523 ///
1524 /// The default value delta min is 0.0.
1525 ///
1526 /// # See also
1527 /// `fann_set_rprop_delta_min`
1528 ///
1529 /// This function appears in FANN >= 1.2.0.
1530 pub fn fann_get_rprop_delta_min(ann: *const fann) -> c_float;
1531
1532 /// The minimum step size is a small positive number determining how small the minimum step size
1533 /// may be.
1534 ///
1535 /// # See also
1536 /// `fann_get_rprop_delta_min`
1537 ///
1538 /// This function appears in FANN >= 1.2.0.
1539 pub fn fann_set_rprop_delta_min(ann: *mut fann, rprop_delta_min: c_float);
1540
1541 /// The maximum step size is a positive number determining how large the maximum step size may
1542 /// be.
1543 ///
1544 /// The default delta max is 50.0.
1545 ///
1546 /// # See also
1547 /// `fann_set_rprop_delta_max`, `fann_get_rprop_delta_min`
1548 ///
1549 /// This function appears in FANN >= 1.2.0.
1550 pub fn fann_get_rprop_delta_max(ann: *const fann) -> c_float;
1551
1552 /// The maximum step size is a positive number determining how large the maximum step size may
1553 /// be.
1554 ///
1555 /// # See also
1556 /// `fann_get_rprop_delta_max`, `fann_get_rprop_delta_min`
1557 ///
1558 /// This function appears in FANN >= 1.2.0.
1559 pub fn fann_set_rprop_delta_max(ann: *mut fann, rprop_delta_max: c_float);
1560
1561 /// The initial step size is a positive number determining the initial step size.
1562 ///
1563 /// The default delta zero is 0.1.
1564 ///
1565 /// # See also
1566 /// `fann_set_rprop_delta_zero`, `fann_get_rprop_delta_min`, `fann_get_rprop_delta_max`
1567 ///
1568 /// This function appears in FANN >= 2.1.0.
1569 pub fn fann_get_rprop_delta_zero(ann: *const fann) -> c_float;
1570
1571 /// The initial step size is a positive number determining the initial step size.
1572 ///
1573 /// # See also
1574 /// `fann_get_rprop_delta_zero`, `fann_get_rprop_delta_zero`
1575 ///
1576 /// This function appears in FANN >= 2.1.0.
1577 pub fn fann_set_rprop_delta_zero(ann: *mut fann, rprop_delta_max: c_float);
1578
1579 /// Trains on an entire dataset, for a period of time using the Cascade2 training algorithm.
1580 /// This algorithm adds neurons to the neural network while training, which means that it
1581 /// needs to start with an ANN without any hidden layers. The neural network should also use
1582 /// shortcut connections, so `fann_create_shortcut` should be used to create the ANN like this:
1583 ///
1584 /// ```notest
1585 /// let ann = fann_create_shortcut(2,
1586 /// fann_num_input_train_data(train_data),
1587 /// fann_num_output_train_data(train_data));
1588 /// ```
1589 ///
1590 /// This training uses the parameters set using `fann_set_cascade_...`, but it also uses
1591 /// another training algorithm as it's internal training algorithm. This algorithm can be set to
1592 /// either `FANN_TRAIN_RPROP` or `FANN_TRAIN_QUICKPROP` by `fann_set_training_algorithm`, and
1593 /// the parameters set for these training algorithms will also affect the cascade training.
1594 ///
1595 /// # Parameters
1596 ///
1597 /// * `ann` - The neural network
1598 /// * `data` - The data that should be used during training
1599 /// * `max_neuron` - The maximum number of neurons to be added to the ANN
1600 /// * `neurons_between_reports` - The number of neurons between printing a status report to
1601 /// stdout. A value of zero means no reports should be printed.
1602 /// * `desired_error` - The desired `fann_get_MSE` or `fann_get_bit_fail`, depending
1603 /// on which stop function is chosen by `fann_set_train_stop_function`.
1604 ///
1605 /// Instead of printing out reports every neurons_between_reports, a callback function can be
1606 /// called (see `fann_set_callback`).
1607 ///
1608 /// # See also
1609 /// `fann_train_on_data`, `fann_cascadetrain_on_file`
1610 ///
1611 /// This function appears in FANN >= 2.0.0.
1612 pub fn fann_cascadetrain_on_data(
1613 ann: *mut fann,
1614 data: *const fann_train_data,
1615 max_neurons: c_uint,
1616 neurons_between_reports: c_uint,
1617 desired_error: c_float,
1618 );
1619
1620 /// Does the same as `fann_cascadetrain_on_data`, but reads the training data directly from a
1621 /// file.
1622 ///
1623 /// # See also
1624 /// `fann_cascadetrain_on_data`
1625 ///
1626 /// This function appears in FANN >= 2.0.0.
1627 pub fn fann_cascadetrain_on_file(
1628 ann: *mut fann,
1629 filename: *const c_char,
1630 max_neurons: c_uint,
1631 neurons_between_reports: c_uint,
1632 desired_error: c_float,
1633 );
1634
1635 /// The cascade output change fraction is a number between 0 and 1 determining how large a
1636 /// fraction the `fann_get_MSE` value should change within
1637 /// `fann_get_cascade_output_stagnation_epochs` during training of the output connections, in
1638 /// order for the training not to stagnate. If the training stagnates, the training of the
1639 /// output connections will be ended and new candidates will be prepared.
1640 ///
1641 /// This means:
1642 /// If the MSE does not change by a fraction of `fann_get_cascade_output_change_fraction` during
1643 /// a period of `fann_get_cascade_output_stagnation_epochs`, the training of the output
1644 /// connections is stopped because the training has stagnated.
1645 ///
1646 /// If the cascade output change fraction is low, the output connections will be trained more
1647 /// and if the fraction is high they will be trained less.
1648 ///
1649 /// The default cascade output change fraction is 0.01, which is equivalent to a 1% change in
1650 /// MSE.
1651 ///
1652 /// # See also
1653 /// `fann_set_cascade_output_change_fraction`, `fann_get_MSE`,
1654 /// `fann_get_cascade_output_stagnation_epochs`
1655 ///
1656 /// This function appears in FANN >= 2.0.0.
1657 pub fn fann_get_cascade_output_change_fraction(ann: *const fann) -> c_float;
1658
1659 /// Sets the cascade output change fraction.
1660 ///
1661 /// # See also
1662 /// `fann_get_cascade_output_change_fraction`
1663 ///
1664 /// This function appears in FANN >= 2.0.0.
1665 pub fn fann_set_cascade_output_change_fraction(
1666 ann: *mut fann,
1667 cascade_output_change_fraction: c_float,
1668 );
1669
1670 /// The number of cascade output stagnation epochs determines the number of epochs training is
1671 /// allowed to continue without changing the MSE by a fraction of
1672 /// `fann_get_cascade_output_change_fraction`.
1673 ///
1674 /// See more info about this parameter in `fann_get_cascade_output_change_fraction`.
1675 ///
1676 /// The default number of cascade output stagnation epochs is 12.
1677 ///
1678 /// # See also
1679 /// `fann_set_cascade_output_stagnation_epochs`, `fann_get_cascade_output_change_fraction`
1680 ///
1681 /// This function appears in FANN >= 2.0.0.
1682 pub fn fann_get_cascade_output_stagnation_epochs(ann: *const fann) -> c_uint;
1683
1684 /// Sets the number of cascade output stagnation epochs.
1685 ///
1686 /// # See also
1687 /// `fann_get_cascade_output_stagnation_epochs`
1688 ///
1689 /// This function appears in FANN >= 2.0.0.
1690 pub fn fann_set_cascade_output_stagnation_epochs(
1691 ann: *mut fann,
1692 cascade_output_stagnation_epochs: c_uint,
1693 );
1694
1695 /// The cascade candidate change fraction is a number between 0 and 1 determining how large a
1696 /// fraction the `fann_get_MSE` value should change within
1697 /// `fann_get_cascade_candidate_stagnation_epochs` during training of the candidate neurons, in
1698 /// order for the training not to stagnate. If the training stagnates, the training of the
1699 /// candidate neurons will be ended and the best candidate will be selected.
1700 ///
1701 /// This means:
1702 /// If the MSE does not change by a fraction of `fann_get_cascade_candidate_change_fraction`
1703 /// during a period of `fann_get_cascade_candidate_stagnation_epochs`, the training of the
1704 /// candidate neurons is stopped because the training has stagnated.
1705 ///
1706 /// If the cascade candidate change fraction is low, the candidate neurons will be trained more
1707 /// and if the fraction is high they will be trained less.
1708 ///
1709 /// The default cascade candidate change fraction is 0.01, which is equivalent to a 1% change in
1710 /// MSE.
1711 ///
1712 /// # See also
1713 /// `fann_set_cascade_candidate_change_fraction`, `fann_get_MSE`,
1714 /// `fann_get_cascade_candidate_stagnation_epochs`
1715 ///
1716 /// This function appears in FANN >= 2.0.0.
1717 pub fn fann_get_cascade_candidate_change_fraction(ann: *const fann) -> c_float;
1718
1719 /// Sets the cascade candidate change fraction.
1720 ///
1721 /// # See also
1722 /// `fann_get_cascade_candidate_change_fraction`
1723 ///
1724 /// This function appears in FANN >= 2.0.0.
1725 pub fn fann_set_cascade_candidate_change_fraction(
1726 ann: *mut fann,
1727 cascade_candidate_change_fraction: c_float,
1728 );
1729
1730 /// The number of cascade candidate stagnation epochs determines the number of epochs training
1731 /// is allowed to continue without changing the MSE by a fraction of
1732 /// `fann_get_cascade_candidate_change_fraction`.
1733 ///
1734 /// See more info about this parameter in `fann_get_cascade_candidate_change_fraction`.
1735 ///
1736 /// The default number of cascade candidate stagnation epochs is 12.
1737 ///
1738 /// # See also
1739 /// `fann_set_cascade_candidate_stagnation_epochs`, `fann_get_cascade_candidate_change_fraction`
1740 ///
1741 /// This function appears in FANN >= 2.0.0.
1742 pub fn fann_get_cascade_candidate_stagnation_epochs(ann: *const fann) -> c_uint;
1743
1744 /// Sets the number of cascade candidate stagnation epochs.
1745 ///
1746 /// # See also
1747 /// `fann_get_cascade_candidate_stagnation_epochs`
1748 ///
1749 /// This function appears in FANN >= 2.0.0.
1750 pub fn fann_set_cascade_candidate_stagnation_epochs(
1751 ann: *mut fann,
1752 cascade_candidate_stagnation_epochs: c_uint,
1753 );
1754
1755 /// The weight multiplier is a parameter which is used to multiply the weights from the
1756 /// candidate neuron before adding the neuron to the neural network. This parameter is usually
1757 /// between 0 and 1, and is used to make the training a bit less aggressive.
1758 ///
1759 /// The default weight multiplier is 0.4
1760 ///
1761 /// # See also
1762 /// `fann_set_cascade_weight_multiplier`
1763 ///
1764 /// This function appears in FANN >= 2.0.0.
1765 pub fn fann_get_cascade_weight_multiplier(ann: *const fann) -> fann_type;
1766
1767 /// Sets the weight multiplier.
1768 ///
1769 /// # See also
1770 /// `fann_get_cascade_weight_multiplier`
1771 ///
1772 /// This function appears in FANN >= 2.0.0.
1773 pub fn fann_set_cascade_weight_multiplier(ann: *mut fann, cascade_weight_multiplier: fann_type);
1774
1775 /// The candidate limit is a limit for how much the candidate neuron may be trained.
1776 /// The limit is a limit on the proportion between the MSE and candidate score.
1777 ///
1778 /// Set this to a lower value to avoid overfitting and to a higher if overfitting is
1779 /// not a problem.
1780 ///
1781 /// The default candidate limit is 1000.0
1782 ///
1783 /// # See also
1784 /// `fann_set_cascade_candidate_limit`
1785 ///
1786 /// This function appears in FANN >= 2.0.0.
1787 pub fn fann_get_cascade_candidate_limit(ann: *const fann) -> fann_type;
1788
1789 /// Sets the candidate limit.
1790 ///
1791 /// # See also
1792 /// `fann_get_cascade_candidate_limit`
1793 ///
1794 /// This function appears in FANN >= 2.0.0.
1795 pub fn fann_set_cascade_candidate_limit(ann: *mut fann, cascade_candidate_limit: fann_type);
1796
1797 /// The maximum out epochs determines the maximum number of epochs the output connections
1798 /// may be trained after adding a new candidate neuron.
1799 ///
1800 /// The default max out epochs is 150
1801 ///
1802 /// # See also
1803 /// `fann_set_cascade_max_out_epochs`
1804 ///
1805 /// This function appears in FANN >= 2.0.0.
1806 pub fn fann_get_cascade_max_out_epochs(ann: *const fann) -> c_uint;
1807
1808 /// Sets the maximum out epochs.
1809 ///
1810 /// # See also
1811 /// `fann_get_cascade_max_out_epochs`
1812 ///
1813 /// This function appears in FANN >= 2.0.0.
1814 pub fn fann_set_cascade_max_out_epochs(ann: *mut fann, cascade_max_out_epochs: c_uint);
1815
1816 /// The maximum candidate epochs determines the maximum number of epochs the input
1817 /// connections to the candidates may be trained before adding a new candidate neuron.
1818 ///
1819 /// The default max candidate epochs is 150.
1820 ///
1821 /// # See also
1822 /// `fann_set_cascade_max_cand_epochs`
1823 ///
1824 /// This function appears in FANN >= 2.0.0.
1825 pub fn fann_get_cascade_max_cand_epochs(ann: *const fann) -> c_uint;
1826
1827 /// Sets the max candidate epochs.
1828 ///
1829 /// # See also
1830 /// `fann_get_cascade_max_cand_epochs`
1831 ///
1832 /// This function appears in FANN >= 2.0.0.
1833 pub fn fann_set_cascade_max_cand_epochs(ann: *mut fann, cascade_max_cand_epochs: c_uint);
1834
1835 /// The number of candidates used during training (calculated by multiplying
1836 /// `fann_get_cascade_activation_functions_count`,
1837 /// `fann_get_cascade_activation_steepnesses_count` and
1838 /// `fann_get_cascade_num_candidate_groups`).
1839 ///
1840 /// The actual candidates is defined by the `fann_get_cascade_activation_functions` and
1841 /// `fann_get_cascade_activation_steepnesses` arrays. These arrays define the activation
1842 /// functions and activation steepnesses used for the candidate neurons. If there are 2
1843 /// activation functions in the activation function array and 3 steepnesses in the steepness
1844 /// array, then there will be 2x3=6 different candidates which will be trained. These 6
1845 /// different candidates can be copied into several candidate groups, where the only difference
1846 /// between these groups is the initial weights. If the number of groups is set to 2, then the
1847 /// number of candidate neurons will be 2x3x2=12. The number of candidate groups is defined by
1848 /// `fann_set_cascade_num_candidate_groups`.
1849 ///
1850 /// The default number of candidates is 6x4x2 = 48
1851 ///
1852 /// # See also
1853 /// `fann_get_cascade_activation_functions`, `fann_get_cascade_activation_functions_count`,
1854 /// `fann_get_cascade_activation_steepnesses`, `fann_get_cascade_activation_steepnesses_count`,
1855 /// `fann_get_cascade_num_candidate_groups`
1856 ///
1857 /// This function appears in FANN >= 2.0.0.
1858 pub fn fann_get_cascade_num_candidates(ann: *const fann) -> c_uint;
1859
1860 /// The number of activation functions in the `fann_get_cascade_activation_functions` array.
1861 ///
1862 /// The default number of activation functions is 6.
1863 ///
1864 /// # See also
1865 /// `fann_get_cascade_activation_functions`, `fann_set_cascade_activation_functions`
1866 ///
1867 /// This function appears in FANN >= 2.0.0.
1868 pub fn fann_get_cascade_activation_functions_count(ann: *const fann) -> c_uint;
1869
1870 /// The cascade activation functions array is an array of the different activation functions
1871 /// used by the candidates.
1872 ///
1873 /// See `fann_get_cascade_num_candidates` for a description of which candidate neurons will be
1874 /// generated by this array.
1875 ///
1876 /// The default activation functions is {`FANN_SIGMOID`, `FANN_SIGMOID_SYMMETRIC`,
1877 /// `FANN_GAUSSIAN`, `FANN_GAUSSIAN_SYMMETRIC`, `FANN_ELLIOTT`, `FANN_ELLIOTT_SYMMETRIC`,
1878 /// `FANN_SIN_SYMMETRIC`, `FANN_COS_SYMMETRIC`, `FANN_SIN`, `FANN_COS`}
1879 ///
1880 /// # See also
1881 /// `fann_get_cascade_activation_functions_count`, `fann_set_cascade_activation_functions`,
1882 /// `fann_activationfunc_enum`
1883 ///
1884 /// This function appears in FANN >= 2.0.0.
1885 pub fn fann_get_cascade_activation_functions(ann: *const fann)
1886 -> *mut fann_activationfunc_enum;
1887
1888 /// Sets the array of cascade candidate activation functions. The array must be just as long
1889 /// as defined by the count.
1890 ///
1891 /// See `fann_get_cascade_num_candidates` for a description of which candidate neurons will be
1892 /// generated by this array.
1893 ///
1894 /// # See also
1895 /// `fann_get_cascade_activation_steepnesses_count`, `fann_get_cascade_activation_steepnesses`
1896 ///
1897 /// This function appears in FANN >= 2.0.0.
1898 pub fn fann_set_cascade_activation_functions(
1899 ann: *mut fann,
1900 cascade_activation_functions: *const fann_activationfunc_enum,
1901 cascade_activation_functions_count: c_uint,
1902 );
1903
1904 /// The number of activation steepnesses in the `fann_get_cascade_activation_functions` array.
1905 ///
1906 /// The default number of activation steepnesses is 4.
1907 ///
1908 /// # See also
1909 /// `fann_get_cascade_activation_steepnesses`, `fann_set_cascade_activation_functions`
1910 ///
1911 /// This function appears in FANN >= 2.0.0.
1912 pub fn fann_get_cascade_activation_steepnesses_count(ann: *const fann) -> c_uint;
1913
1914 /// The cascade activation steepnesses array is an array of the different activation functions
1915 /// used by the candidates.
1916 ///
1917 /// See `fann_get_cascade_num_candidates` for a description of which candidate neurons will be
1918 /// generated by this array.
1919 ///
1920 /// The default activation steepnesses is {0.25, 0.50, 0.75, 1.00}
1921 ///
1922 /// # See also
1923 /// `fann_set_cascade_activation_steepnesses`, `fann_get_cascade_activation_steepnesses_count`
1924 ///
1925 /// This function appears in FANN >= 2.0.0.
1926 pub fn fann_get_cascade_activation_steepnesses(ann: *const fann) -> *mut fann_type;
1927
1928 /// Sets the array of cascade candidate activation steepnesses. The array must be just as long
1929 /// as defined by the count.
1930 ///
1931 /// See `fann_get_cascade_num_candidates` for a description of which candidate neurons will be
1932 /// generated by this array.
1933 ///
1934 /// # See also
1935 /// `fann_get_cascade_activation_steepnesses`, `fann_get_cascade_activation_steepnesses_count`
1936 ///
1937 /// This function appears in FANN >= 2.0.0.
1938 pub fn fann_set_cascade_activation_steepnesses(
1939 ann: *mut fann,
1940 cascade_activation_steepnesses: *const fann_type,
1941 cascade_activation_steepnesses_count: c_uint,
1942 );
1943
1944 /// The number of candidate groups is the number of groups of identical candidates which will be
1945 /// used during training.
1946 ///
1947 /// This number can be used to have more candidates without having to define new parameters for
1948 /// the candidates.
1949 ///
1950 /// See `fann_get_cascade_num_candidates` for a description of which candidate neurons will be
1951 /// generated by this parameter.
1952 ///
1953 /// The default number of candidate groups is 2.
1954 ///
1955 /// # See also
1956 /// `fann_set_cascade_num_candidate_groups`
1957 ///
1958 /// This function appears in FANN >= 2.0.0.
1959 pub fn fann_get_cascade_num_candidate_groups(ann: *const fann) -> c_uint;
1960
1961 /// Sets the number of candidate groups.
1962 ///
1963 /// # See also
1964 /// `fann_get_cascade_num_candidate_groups`
1965 ///
1966 /// This function appears in FANN >= 2.0.0.
1967 pub fn fann_set_cascade_num_candidate_groups(
1968 ann: *mut fann,
1969 cascade_num_candidate_groups: c_uint,
1970 );
1971
1972 /// Constructs a backpropagation neural network from a configuration file, which has been saved
1973 /// by `fann_save`.
1974 ///
1975 /// # See also
1976 /// `fann_save`, `fann_save_to_fixed`
1977 ///
1978 /// This function appears in FANN >= 1.0.0.
1979 pub fn fann_create_from_file(configuration_file: *const c_char) -> *mut fann;
1980
1981 /// Save the entire network to a configuration file.
1982 ///
1983 /// The configuration file contains all information about the neural network and enables
1984 /// `fann_create_from_file` to create an exact copy of the neural network and all of the
1985 /// parameters associated with the neural network.
1986 ///
1987 /// These three parameters (`fann_set_callback`, `fann_set_error_log`,
1988 /// `fann_set_user_data`) are *NOT* saved to the file because they cannot safely be
1989 /// ported to a different location. Also temporary parameters generated during training
1990 /// like `fann_get_MSE` are not saved.
1991 ///
1992 /// # Return
1993 /// The function returns 0 on success and -1 on failure.
1994 ///
1995 /// # See also
1996 /// `fann_create_from_file`, `fann_save_to_fixed`
1997 ///
1998 /// This function appears in FANN >= 1.0.0.
1999 pub fn fann_save(ann: *mut fann, configuration_file: *const c_char) -> c_int;
2000
2001 /// Saves the entire network to a configuration file.
2002 /// But it is saved in fixed point format no matter which
2003 /// format it is currently in.
2004 ///
2005 /// This is useful for training a network in floating points,
2006 /// and then later executing it in fixed point.
2007 ///
2008 /// The function returns the bit position of the fix point, which
2009 /// can be used to find out how accurate the fixed point network will be.
2010 /// A high value indicates high precision, and a low value indicates low
2011 /// precision.
2012 ///
2013 /// A negative value indicates very low precision, and a very strong possibility for overflow.
2014 /// (the actual fix point will be set to 0, since a negative fix point does not make sense).
2015 ///
2016 /// Generally, a fix point lower than 6 is bad, and should be avoided.
2017 /// The best way to avoid this is to have fewer connections to each neuron,
2018 /// or just fewer neurons in each layer.
2019 ///
2020 /// The fixed point use of this network is only intended for use on machines that
2021 /// have no floating point processor, like an iPAQ. On normal computers the floating
2022 /// point version is actually faster.
2023 ///
2024 /// # See also
2025 /// `fann_create_from_file`, `fann_save`
2026 ///
2027 /// This function appears in FANN >= 1.0.0.
2028 pub fn fann_save_to_fixed(ann: *mut fann, configuration_file: *const c_char) -> c_int;
2029
2030 /// Creates a standard fully connected backpropagation neural network.
2031 ///
2032 /// There will be a bias neuron in each layer (except the output layer),
2033 /// and this bias neuron will be connected to all neurons in the next layer.
2034 /// When running the network, the bias nodes always emit 1.
2035 ///
2036 /// To destroy a `fann` use the `fann_destroy` function.
2037 ///
2038 /// # Parameters
2039 ///
2040 /// * `num_layers` - The total number of layers including the input and the output layer.
2041 /// * `...` - Integer values determining the number of neurons in each layer starting
2042 /// with the input layer and ending with the output layer.
2043 ///
2044 /// # Returns
2045 ///
2046 /// A pointer to the newly created `fann`.
2047 ///
2048 /// # Example
2049 ///
2050 ///
2051 /// ```
2052 /// // Creating an ANN with 2 input neurons, 1 output neuron,
2053 /// // and two hidden layers with 8 and 9 neurons
2054 /// unsafe {
2055 /// let ann = fann_sys::fann_create_standard(4, 2, 8, 9, 1);
2056 /// }
2057 /// ```
2058 ///
2059 /// This function appears in FANN >= 2.0.0.
2060 pub fn fann_create_standard(num_layers: c_uint, ...) -> *mut fann;
2061
2062 /// Just like `fann_create_standard`, but with an array of layer sizes
2063 /// instead of individual parameters.
2064 ///
2065 /// # Example
2066 ///
2067 /// ```
2068 /// // Creating an ANN with 2 input neurons, 1 output neuron,
2069 /// // and two hidden layers with 8 and 9 neurons
2070 /// let layers = [2, 8, 9, 1];
2071 /// unsafe {
2072 /// let ann = fann_sys::fann_create_standard_array(4, layers.as_ptr());
2073 /// }
2074 /// ```
2075 ///
2076 /// # See also
2077 /// `fann_create_standard`, `fann_create_sparse`, `fann_create_shortcut`
2078 ///
2079 /// This function appears in FANN >= 2.0.0.
2080 pub fn fann_create_standard_array(num_layers: c_uint, layers: *const c_uint) -> *mut fann;
2081
2082 /// Creates a standard backpropagation neural network, which is not fully connected.
2083 ///
2084 /// # Parameters
2085 ///
2086 /// * `connection_rate` - The connection rate controls how many connections there will be in the
2087 /// network. If the connection rate is set to 1, the network will be fully
2088 /// connected, but if it is set to 0.5, only half of the connections will be set.
2089 /// A connection rate of 1 will yield the same result as `fann_create_standard`.
2090 /// * `num_layers` - The total number of layers including the input and the output layer.
2091 /// * `...` - Integer values determining the number of neurons in each layer
2092 /// starting with the input layer and ending with the output layer.
2093 ///
2094 /// # Returns
2095 /// A pointer to the newly created `fann`.
2096 ///
2097 /// # See also
2098 /// `fann_create_sparse_array`, `fann_create_standard`, `fann_create_shortcut`
2099 ///
2100 /// This function appears in FANN >= 2.0.0.
2101 pub fn fann_create_sparse(connection_rate: c_float, num_layers: c_uint, ...) -> *mut fann;
2102
2103 /// Just like `fann_create_sparse`, but with an array of layer sizes
2104 /// instead of individual parameters.
2105 ///
2106 /// See `fann_create_standard_array` for a description of the parameters.
2107 ///
2108 /// # See also
2109 /// `fann_create_sparse`, `fann_create_standard`, `fann_create_shortcut`
2110 ///
2111 /// This function appears in FANN >= 2.0.0.
2112 pub fn fann_create_sparse_array(
2113 connection_rate: c_float,
2114 num_layers: c_uint,
2115 layers: *const c_uint,
2116 ) -> *mut fann;
2117
2118 /// Creates a standard backpropagation neural network, which is not fully connected and which
2119 /// also has shortcut connections.
2120 ///
2121 /// Shortcut connections are connections that skip layers. A fully connected network with
2122 /// shortcut connections is a network where all neurons are connected to all neurons in later
2123 /// layers. Including direct connections from the input layer to the output layer.
2124 ///
2125 /// See `fann_create_standard` for a description of the parameters.
2126 ///
2127 /// # See also
2128 /// `fann_create_shortcut_array`, `fann_create_standard`, `fann_create_sparse`,
2129 ///
2130 /// This function appears in FANN >= 2.0.0.
2131 pub fn fann_create_shortcut(num_layers: c_uint, ...) -> *mut fann;
2132
2133 /// Just like `fann_create_shortcut`, but with an array of layer sizes
2134 /// instead of individual parameters.
2135 ///
2136 /// See `fann_create_standard_array` for a description of the parameters.
2137 ///
2138 /// # See also
2139 /// `fann_create_shortcut`, `fann_create_standard`, `fann_create_sparse`
2140 ///
2141 /// This function appears in FANN >= 2.0.0.
2142 pub fn fann_create_shortcut_array(num_layers: c_uint, layers: *const c_uint) -> *mut fann;
2143
2144 /// Destroys the entire network, properly freeing all the associated memory.
2145 ///
2146 /// This function appears in FANN >= 1.0.0.
2147 pub fn fann_destroy(ann: *mut fann);
2148
2149 /// Runs input through the neural network, returning an array of outputs, the number of
2150 /// which being equal to the number of neurons in the output layer.
2151 ///
2152 /// Ownership of the output array remains with the `fann` structure. It may be overwritten by
2153 /// subsequent function calls. Do not deallocate it!
2154 ///
2155 /// # See also
2156 /// `fann_test`
2157 ///
2158 /// This function appears in FANN >= 1.0.0.
2159 pub fn fann_run(ann: *mut fann, input: *const fann_type) -> *mut fann_type;
2160
2161 /// Give each connection a random weight between `min_weight` and `max_weight`.
2162 ///
2163 /// From the beginning the weights are random between -0.1 and 0.1.
2164 ///
2165 /// # See also
2166 /// `fann_init_weights`
2167 ///
2168 /// This function appears in FANN >= 1.0.0.
2169 pub fn fann_randomize_weights(ann: *mut fann, min_weight: fann_type, max_weight: fann_type);
2170
2171 /// Initialize the weights using Widrow + Nguyen's algorithm.
2172 ///
2173 /// This function behaves similarly to `fann_randomize_weights`. It will use the algorithm
2174 /// developed by Derrick Nguyen and Bernard Widrow to set the weights in such a way
2175 /// as to speed up training. This technique is not always successful, and in some cases can be
2176 /// less efficient than a purely random initialization.
2177 ///
2178 /// The algorithm requires access to the range of the input data (ie, largest and smallest
2179 /// input), and therefore accepts a second argument, `data`, which is the training data that
2180 /// will be used to train the network.
2181 ///
2182 /// # See also
2183 /// `fann_randomize_weights`, `fann_read_train_from_file`
2184 ///
2185 /// This function appears in FANN >= 1.1.0.
2186 pub fn fann_init_weights(ann: *mut fann, train_data: *mut fann_train_data);
2187
2188 /// Prints the connections of the ANN in a compact matrix, for easy viewing of the internals
2189 /// of the ANN.
2190 ///
2191 /// The output from `fann_print_connections` on a small (2 2 1) network trained on the xor
2192 /// problem:
2193 ///
2194 /// ```text
2195 /// Layer / Neuron 012345
2196 /// L 1 / N 3 BBa...
2197 /// L 1 / N 4 BBA...
2198 /// L 1 / N 5 ......
2199 /// L 2 / N 6 ...BBA
2200 /// L 2 / N 7 ......
2201 /// ```
2202 ///
2203 /// This network has five real neurons and two bias neurons. This gives a total of seven
2204 /// neurons named from 0 to 6. The connections between these neurons can be seen in the matrix.
2205 /// "." is a place where there is no connection, while a character tells how strong the
2206 /// connection is on a scale from a-z. The two real neurons in the hidden layer (neuron 3 and 4
2207 /// in layer 1) have connections from the three neurons in the previous layer as is visible in
2208 /// the first two lines. The output neuron 6 has connections from the three neurons in the
2209 /// hidden layer 3 - 5 as is visible in the fourth line.
2210 ///
2211 /// To simplify the matrix output neurons are not visible as neurons that connections can come
2212 /// from, and input and bias neurons are not visible as neurons that connections can go to.
2213 ///
2214 /// This function appears in FANN >= 1.2.0.
2215 pub fn fann_print_connections(ann: *mut fann);
2216
2217 /// Prints all of the parameters and options of the ANN.
2218 ///
2219 /// This function appears in FANN >= 1.2.0.
2220 pub fn fann_print_parameters(ann: *mut fann);
2221
2222 /// Get the number of input neurons.
2223 ///
2224 /// This function appears in FANN >= 1.0.0.
2225 pub fn fann_get_num_input(ann: *const fann) -> c_uint;
2226
2227 /// Get the number of output neurons.
2228 ///
2229 /// This function appears in FANN >= 1.0.0.
2230 pub fn fann_get_num_output(ann: *const fann) -> c_uint;
2231
2232 /// Get the total number of neurons in the entire network. This number does also include the
2233 /// bias neurons, so a 2-4-2 network has 2+4+2 +2(bias) = 10 neurons.
2234 ///
2235 /// This function appears in FANN >= 1.0.0.
2236 pub fn fann_get_total_neurons(ann: *const fann) -> c_uint;
2237
2238 /// Get the total number of connections in the entire network.
2239 ///
2240 /// This function appears in FANN >= 1.0.0.
2241 pub fn fann_get_total_connections(ann: *const fann) -> c_uint;
2242
2243 /// Get the type of neural network it was created as.
2244 ///
2245 /// # Parameters
2246 ///
2247 /// * `ann` - A previously created neural network structure of type `fann` pointer.
2248 ///
2249 /// # Returns
2250 /// The neural network type from enum `fann_network_type_enum`
2251 ///
2252 /// # See also
2253 /// `fann_network_type_enum`
2254 ///
2255 /// This function appears in FANN >= 2.1.0
2256 pub fn fann_get_network_type(ann: *const fann) -> fann_nettype_enum;
2257
2258 /// Get the connection rate used when the network was created.
2259 ///
2260 /// # Parameters
2261 ///
2262 /// * `ann` - A previously created neural network structure of type `fann` pointer.
2263 ///
2264 /// # Returns
2265 /// The connection rate
2266 ///
2267 /// This function appears in FANN >= 2.1.0
2268 pub fn fann_get_connection_rate(ann: *const fann) -> c_float;
2269
2270 /// Get the number of layers in the network.
2271 ///
2272 /// # Parameters
2273 ///
2274 /// * `ann` - A previously created neural network structure of type `fann` pointer.
2275 ///
2276 /// # Returns
2277 ///
2278 /// The number of layers in the neural network
2279 ///
2280 /// # Example
2281 ///
2282 /// ```
2283 /// // Obtain the number of layers in a neural network
2284 /// unsafe {
2285 /// let ann = fann_sys::fann_create_standard(4, 2, 8, 9, 1);
2286 /// assert_eq!(4, fann_sys::fann_get_num_layers(ann));
2287 /// }
2288 /// ```
2289 ///
2290 /// This function appears in FANN >= 2.1.0
2291 pub fn fann_get_num_layers(ann: *const fann) -> c_uint;
2292
2293 /// Get the number of neurons in each layer in the network.
2294 ///
2295 /// Bias is not included so the layers match the `fann_create` functions.
2296 ///
2297 /// # Parameters
2298 ///
2299 /// * `ann` - A previously created neural network structure of type `fann` pointer.
2300 ///
2301 /// The layers array must be preallocated to accommodate at least `fann_num_layers` items.
2302 ///
2303 /// This function appears in FANN >= 2.1.0.
2304 pub fn fann_get_layer_array(ann: *const fann, layers: *mut c_uint);
2305
2306 /// Get the number of bias in each layer in the network.
2307 ///
2308 /// # Parameters
2309 ///
2310 /// * `ann` - A previously created neural network structure of type `fann` pointer.
2311 ///
2312 /// The bias array must be preallocated to accommodate at least `fann_num_layers` items.
2313 ///
2314 /// This function appears in FANN >= 2.1.0.
2315 pub fn fann_get_bias_array(ann: *const fann, bias: *mut c_uint);
2316
2317 /// Get the connections in the network.
2318 ///
2319 /// # Parameters
2320 ///
2321 /// * `ann` - A previously created neural network structure of type `fann` pointer.
2322 ///
2323 /// The connections array must be preallocated to accommodate at least
2324 /// `fann_get_total_connections` items.
2325 ///
2326 /// This function appears in FANN >= 2.1.0.
2327 pub fn fann_get_connection_array(ann: *const fann, connections: *mut fann_connection);
2328
2329 /// Set connections in the network.
2330 ///
2331 /// # Parameters
2332 ///
2333 /// * `ann` - A previously created neural network structure of type `fann` pointer.
2334 ///
2335 /// Only the weights can be changed, connections and weights are ignored
2336 /// if they do not already exist in the network.
2337 ///
2338 /// The array must accommodate `num_connections` items.
2339 ///
2340 /// This function appears in FANN >= 2.1.0.
2341 pub fn fann_set_weight_array(
2342 ann: *mut fann,
2343 connections: *mut fann_connection,
2344 num_connections: c_uint,
2345 );
2346
2347 /// Set a connection in the network.
2348 ///
2349 /// # Parameters
2350 ///
2351 /// * `ann` - A previously created neural network structure of type `fann` pointer.
2352 ///
2353 /// Only the weights can be changed. The connection/weight is
2354 /// ignored if it does not already exist in the network.
2355 ///
2356 /// This function appears in FANN >= 2.1.0.
2357 pub fn fann_set_weight(
2358 ann: *mut fann,
2359 from_neuron: c_uint,
2360 to_neuron: c_uint,
2361 weight: fann_type,
2362 );
2363
2364 /// Store a pointer to user defined data. The pointer can be retrieved with `fann_get_user_data`
2365 /// for example in a callback. It is the user's responsibility to allocate and deallocate any
2366 /// data that the pointer might point to.
2367 ///
2368 /// # Parameters
2369 ///
2370 /// * `ann` - A previously created neural network structure of type `fann` pointer.
2371 /// * `user_data` - A void pointer to user defined data.
2372 ///
2373 /// This function appears in FANN >= 2.1.0.
2374 pub fn fann_set_user_data(ann: *mut fann, user_data: *mut c_void);
2375
2376 /// Get a pointer to user defined data that was previously set with `fann_set_user_data`. It is
2377 /// the user's responsibility to allocate and deallocate any data that the pointer might point
2378 /// to.
2379 ///
2380 /// # Parameters
2381 ///
2382 /// * `ann` - A previously created neural network structure of type `fann` pointer.
2383 ///
2384 /// # Returns
2385 /// A void pointer to user defined data.
2386 ///
2387 /// This function appears in FANN >= 2.1.0.
2388 pub fn fann_get_user_data(ann: *mut fann) -> *mut c_void;
2389}
2390
2391#[cfg(test)]
2392mod tests {
2393 use super::*;
2394 use std::ffi::CString;
2395 use std::fs::remove_file;
2396 use std::str::from_utf8;
2397
2398 const EPSILON: fann_type = 0.2;
2399
2400 #[test]
2401 fn test_tutorial_example() {
2402 let c_trainfile = CString::new(&b"test_files/xor.data"[..]).unwrap();
2403 let p_trainfile = c_trainfile.as_ptr();
2404 let c_savefile = CString::new(&b"test_files/xor.net"[..]).unwrap();
2405 let p_savefile = c_savefile.as_ptr();
2406 // Train an ANN with a data set and then save the ANN to a file.
2407 let num_input = 2;
2408 let num_output = 1;
2409 let num_layers = 3;
2410 let num_neurons_hidden = 3;
2411 let desired_error = 0.001;
2412 let max_epochs = 500_000;
2413 let epochs_between_reports = 1000;
2414 unsafe {
2415 let ann = fann_create_standard(num_layers, num_input, num_neurons_hidden, num_output);
2416 fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC);
2417 fann_set_activation_function_output(ann, FANN_SIGMOID_SYMMETRIC);
2418 fann_train_on_file(
2419 ann,
2420 p_trainfile,
2421 max_epochs,
2422 epochs_between_reports,
2423 desired_error,
2424 );
2425 fann_save(ann, p_savefile);
2426 fann_destroy(ann);
2427 }
2428 // Load the ANN and execute input.
2429 unsafe {
2430 let ann = fann_create_from_file(p_savefile);
2431 assert!(EPSILON > (1.0 - *fann_run(ann, [-1.0, 1.0].as_ptr())).abs());
2432 assert!(EPSILON > (1.0 - *fann_run(ann, [1.0, -1.0].as_ptr())).abs());
2433 assert!(EPSILON > (-1.0 - *fann_run(ann, [1.0, 1.0].as_ptr())).abs());
2434 assert!(EPSILON > (-1.0 - *fann_run(ann, [-1.0, -1.0].as_ptr())).abs());
2435 fann_destroy(ann);
2436 }
2437 // Delete the ANN file created by the test.
2438 remove_file(from_utf8(c_savefile.to_bytes()).unwrap()).unwrap();
2439 }
2440}