[][src]Trait opencv::hdf::HDF5

pub trait HDF5 {
    pub fn as_raw_HDF5(&self) -> *const c_void;
pub fn as_raw_mut_HDF5(&mut self) -> *mut c_void; pub fn close(&mut self) -> Result<()> { ... }
pub fn grcreate(&mut self, grlabel: &str) -> Result<()> { ... }
pub fn hlexists(&self, label: &str) -> Result<bool> { ... }
pub fn atexists(&self, atlabel: &str) -> Result<bool> { ... }
pub fn atdelete(&mut self, atlabel: &str) -> Result<()> { ... }
pub fn atwrite(&mut self, value: i32, atlabel: &str) -> Result<()> { ... }
pub fn atread(&mut self, value: &mut i32, atlabel: &str) -> Result<()> { ... }
pub fn atwrite_1(&mut self, value: f64, atlabel: &str) -> Result<()> { ... }
pub fn atread_1(&mut self, value: &mut f64, atlabel: &str) -> Result<()> { ... }
pub fn atwrite_2(&mut self, value: &str, atlabel: &str) -> Result<()> { ... }
pub fn atread_2(&mut self, value: &mut String, atlabel: &str) -> Result<()> { ... }
pub fn atwrite_3(
        &mut self,
        value: &dyn ToInputArray,
        atlabel: &str
    ) -> Result<()> { ... }
pub fn atread_3(
        &mut self,
        value: &mut dyn ToOutputArray,
        atlabel: &str
    ) -> Result<()> { ... }
pub fn dscreate(
        &self,
        rows: i32,
        cols: i32,
        typ: i32,
        dslabel: &str
    ) -> Result<()> { ... }
pub fn dscreate_1(
        &self,
        rows: i32,
        cols: i32,
        typ: i32,
        dslabel: &str,
        compresslevel: i32
    ) -> Result<()> { ... }
pub fn dscreate_2(
        &self,
        rows: i32,
        cols: i32,
        typ: i32,
        dslabel: &str,
        compresslevel: i32,
        dims_chunks: &Vector<i32>
    ) -> Result<()> { ... }
pub fn dscreate_3(
        &self,
        rows: i32,
        cols: i32,
        typ: i32,
        dslabel: &str,
        compresslevel: i32,
        dims_chunks: &i32
    ) -> Result<()> { ... }
pub fn dscreate_4(
        &self,
        n_dims: i32,
        sizes: &i32,
        typ: i32,
        dslabel: &str
    ) -> Result<()> { ... }
pub fn dscreate_5(
        &self,
        n_dims: i32,
        sizes: &i32,
        typ: i32,
        dslabel: &str,
        compresslevel: i32
    ) -> Result<()> { ... }
pub fn dscreate_6(
        &self,
        sizes: &Vector<i32>,
        typ: i32,
        dslabel: &str,
        compresslevel: i32,
        dims_chunks: &Vector<i32>
    ) -> Result<()> { ... }
pub fn dscreate_7(
        &self,
        n_dims: i32,
        sizes: &i32,
        typ: i32,
        dslabel: &str,
        compresslevel: i32,
        dims_chunks: &i32
    ) -> Result<()> { ... }
pub fn dsgetsize(
        &self,
        dslabel: &str,
        dims_flag: i32
    ) -> Result<Vector<i32>> { ... }
pub fn dsgettype(&self, dslabel: &str) -> Result<i32> { ... }
pub fn dswrite(&self, array: &dyn ToInputArray, dslabel: &str) -> Result<()> { ... }
pub fn dswrite_1(
        &self,
        array: &dyn ToInputArray,
        dslabel: &str,
        dims_offset: &i32
    ) -> Result<()> { ... }
pub fn dswrite_2(
        &self,
        array: &dyn ToInputArray,
        dslabel: &str,
        dims_offset: &Vector<i32>,
        dims_counts: &Vector<i32>
    ) -> Result<()> { ... }
pub fn dswrite_3(
        &self,
        array: &dyn ToInputArray,
        dslabel: &str,
        dims_offset: &i32,
        dims_counts: &i32
    ) -> Result<()> { ... }
pub fn dsinsert(
        &self,
        array: &dyn ToInputArray,
        dslabel: &str
    ) -> Result<()> { ... }
pub fn dsinsert_1(
        &self,
        array: &dyn ToInputArray,
        dslabel: &str,
        dims_offset: &i32
    ) -> Result<()> { ... }
pub fn dsinsert_2(
        &self,
        array: &dyn ToInputArray,
        dslabel: &str,
        dims_offset: &Vector<i32>,
        dims_counts: &Vector<i32>
    ) -> Result<()> { ... }
pub fn dsinsert_3(
        &self,
        array: &dyn ToInputArray,
        dslabel: &str,
        dims_offset: &i32,
        dims_counts: &i32
    ) -> Result<()> { ... }
pub fn dsread(
        &self,
        array: &mut dyn ToOutputArray,
        dslabel: &str
    ) -> Result<()> { ... }
pub fn dsread_1(
        &self,
        array: &mut dyn ToOutputArray,
        dslabel: &str,
        dims_offset: &i32
    ) -> Result<()> { ... }
pub fn dsread_2(
        &self,
        array: &mut dyn ToOutputArray,
        dslabel: &str,
        dims_offset: &Vector<i32>,
        dims_counts: &Vector<i32>
    ) -> Result<()> { ... }
pub fn dsread_3(
        &self,
        array: &mut dyn ToOutputArray,
        dslabel: &str,
        dims_offset: &i32,
        dims_counts: &i32
    ) -> Result<()> { ... }
pub fn kpgetsize(&self, kplabel: &str, dims_flag: i32) -> Result<i32> { ... }
pub fn kpcreate(
        &self,
        size: i32,
        kplabel: &str,
        compresslevel: i32,
        chunks: i32
    ) -> Result<()> { ... }
pub fn kpwrite(
        &self,
        keypoints: Vector<KeyPoint>,
        kplabel: &str,
        offset: i32,
        counts: i32
    ) -> Result<()> { ... }
pub fn kpinsert(
        &self,
        keypoints: Vector<KeyPoint>,
        kplabel: &str,
        offset: i32,
        counts: i32
    ) -> Result<()> { ... }
pub fn kpread(
        &self,
        keypoints: &mut Vector<KeyPoint>,
        kplabel: &str,
        offset: i32,
        counts: i32
    ) -> Result<()> { ... } }

Hierarchical Data Format version 5 interface.

Notice that this module is compiled only when hdf5 is correctly installed.

Required methods

pub fn as_raw_HDF5(&self) -> *const c_void[src]

pub fn as_raw_mut_HDF5(&mut self) -> *mut c_void[src]

Loading content...

Provided methods

pub fn close(&mut self) -> Result<()>[src]

Close and release hdf5 object.

pub fn grcreate(&mut self, grlabel: &str) -> Result<()>[src]

Create a group.

Parameters

  • grlabel: specify the hdf5 group label.

Create a hdf5 group with default properties. The group is closed automatically after creation.

Note: Groups are useful for better organising multiple datasets. It is possible to create subgroups within any group. Existence of a particular group can be checked using hlexists(). In case of subgroups, a label would be e.g: 'Group1/SubGroup1' where SubGroup1 is within the root group Group1. Before creating a subgroup, its parent group MUST be created.

  • In this example, Group1 will have one subgroup called SubGroup1:

create_group

The corresponding result visualized using the HDFView tool is

Visualization of groups using the HDFView tool

Note: When a dataset is created with dscreate() or kpcreate(), it can be created within a group by specifying the full path within the label. In our example, it would be: 'Group1/SubGroup1/MyDataSet'. It is not thread safe.

pub fn hlexists(&self, label: &str) -> Result<bool>[src]

Check if label exists or not.

Parameters

  • label: specify the hdf5 dataset label.

Returns true if dataset exists, and false otherwise.

Note: Checks if dataset, group or other object type (hdf5 link) exists under the label name. It is thread safe.

pub fn atexists(&self, atlabel: &str) -> Result<bool>[src]

Check whether a given attribute exits or not in the root group.

Parameters

  • atlabel: the attribute name to be checked.

Returns

true if the attribute exists, false otherwise.

See also

atdelete, atwrite, atread

pub fn atdelete(&mut self, atlabel: &str) -> Result<()>[src]

Delete an attribute from the root group.

Parameters

  • atlabel: the attribute to be deleted.

Note: CV_Error() is called if the given attribute does not exist. Use atexists() to check whether it exists or not beforehand.

See also

atexists, atwrite, atread

pub fn atwrite(&mut self, value: i32, atlabel: &str) -> Result<()>[src]

Write an attribute inside the root group.

Parameters

  • value: attribute value.
  • atlabel: attribute name.

The following example demonstrates how to write an attribute of type cv::String:

snippets_write_str

Note: CV_Error() is called if the given attribute already exists. Use atexists() to check whether it exists or not beforehand. And use atdelete() to delete it if it already exists.

See also

atexists, atdelete, atread

pub fn atread(&mut self, value: &mut i32, atlabel: &str) -> Result<()>[src]

Read an attribute from the root group.

Parameters

  • value: address where the attribute is read into
  • atlabel: attribute name

The following example demonstrates how to read an attribute of type cv::String:

snippets_read_str

Note: The attribute MUST exist, otherwise CV_Error() is called. Use atexists() to check if it exists beforehand.

See also

atexists, atdelete, atwrite

pub fn atwrite_1(&mut self, value: f64, atlabel: &str) -> Result<()>[src]

Write an attribute into the root group.

Parameters

  • value: attribute value. Currently, only n-d continuous multi-channel arrays are supported.
  • atlabel: attribute name.

Note: CV_Error() is called if the given attribute already exists. Use atexists() to check whether it exists or not beforehand. And use atdelete() to delete it if it already exists.

See also

atexists, atdelete, atread.

Overloaded parameters

pub fn atread_1(&mut self, value: &mut f64, atlabel: &str) -> Result<()>[src]

Read an attribute from the root group.

Parameters

  • value: attribute value. Currently, only n-d continuous multi-channel arrays are supported.
  • atlabel: attribute name.

Note: The attribute MUST exist, otherwise CV_Error() is called. Use atexists() to check if it exists beforehand.

See also

atexists, atdelete, atwrite

Overloaded parameters

pub fn atwrite_2(&mut self, value: &str, atlabel: &str) -> Result<()>[src]

Write an attribute into the root group.

Parameters

  • value: attribute value. Currently, only n-d continuous multi-channel arrays are supported.
  • atlabel: attribute name.

Note: CV_Error() is called if the given attribute already exists. Use atexists() to check whether it exists or not beforehand. And use atdelete() to delete it if it already exists.

See also

atexists, atdelete, atread.

Overloaded parameters

pub fn atread_2(&mut self, value: &mut String, atlabel: &str) -> Result<()>[src]

Read an attribute from the root group.

Parameters

  • value: attribute value. Currently, only n-d continuous multi-channel arrays are supported.
  • atlabel: attribute name.

Note: The attribute MUST exist, otherwise CV_Error() is called. Use atexists() to check if it exists beforehand.

See also

atexists, atdelete, atwrite

Overloaded parameters

pub fn atwrite_3(
    &mut self,
    value: &dyn ToInputArray,
    atlabel: &str
) -> Result<()>
[src]

Write an attribute into the root group.

Parameters

  • value: attribute value. Currently, only n-d continuous multi-channel arrays are supported.
  • atlabel: attribute name.

Note: CV_Error() is called if the given attribute already exists. Use atexists() to check whether it exists or not beforehand. And use atdelete() to delete it if it already exists.

See also

atexists, atdelete, atread.

pub fn atread_3(
    &mut self,
    value: &mut dyn ToOutputArray,
    atlabel: &str
) -> Result<()>
[src]

Read an attribute from the root group.

Parameters

  • value: attribute value. Currently, only n-d continuous multi-channel arrays are supported.
  • atlabel: attribute name.

Note: The attribute MUST exist, otherwise CV_Error() is called. Use atexists() to check if it exists beforehand.

See also

atexists, atdelete, atwrite

pub fn dscreate(
    &self,
    rows: i32,
    cols: i32,
    typ: i32,
    dslabel: &str
) -> Result<()>
[src]

Create and allocate storage for n-dimensional dataset, single or multichannel type.

Parameters

  • n_dims: declare number of dimensions
  • sizes: array containing sizes for each dimensions
  • type: type to be used, e.g., CV_8UC3, CV_32FC1, etc.
  • dslabel: specify the hdf5 dataset label. Existing dataset label will cause an error.
  • compresslevel: specify the compression level 0-9 to be used, H5_NONE is the default value and means no compression. The value 0 also means no compression. A value 9 indicating the best compression ration. Note that a higher compression level indicates a higher computational cost. It relies on GNU gzip for compression.
  • dims_chunks: each array member specifies chunking sizes to be used for block I/O, by default NULL means none at all.

Note: If the dataset already exists, an exception will be thrown. Existence of the dataset can be checked using hlexists().

  • See example below that creates a 6 dimensional storage space:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // create space for 6 dimensional CV_64FC2 matrix
  if ( ! h5io->hlexists( "nddata" ) )
   int n_dims = 5;
   int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
   h5io->dscreate( n_dims, sizes, CV_64FC2, "nddata" );
  else
   printf("DS already created, skipping\n" );
  // release
  h5io->close();

Note: Activating compression requires internal chunking. Chunking can significantly improve access speed both at read and write time, especially for windowed access logic that shifts offset inside dataset. If no custom chunking is specified, the default one will be invoked by the size of whole dataset as single big chunk of data.

  • See example of level 0 compression (shallow) using chunking against the first dimension, thus storage will consists of 100 chunks of data:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // create space for 6 dimensional CV_64FC2 matrix
  if ( ! h5io->hlexists( "nddata" ) )
   int n_dims = 5;
   int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
   int chunks[n_dims] = {   1, 100, 20, 10, 5, 5 };
   h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", 0, chunks );
  else
   printf("DS already created, skipping\n" );
  // release
  h5io->close();

Note: A value of H5_UNLIMITED inside the sizes array means unlimited data on that dimension, thus it is possible to expand anytime such dataset on those unlimited directions. Presence of H5_UNLIMITED on any dimension requires to define custom chunking. No default chunking will be defined in unlimited scenario since the default size on that dimension will be zero, and will grow once dataset is written. Writing into dataset that has H5_UNLIMITED on some of its dimension requires dsinsert() instead of dswrite() that allows growth on unlimited dimension instead of dswrite() that allows to write only in predefined data space.

  • Example below shows a 3 dimensional dataset using no compression with all unlimited sizes and one unit chunking:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  int n_dims = 3;
  int chunks[n_dims] = { 1, 1, 1 };
  int dsdims[n_dims] = { cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED };
  h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", cv::hdf::HDF5::H5_NONE, chunks );
  // release
  h5io->close();

Overloaded parameters

pub fn dscreate_1(
    &self,
    rows: i32,
    cols: i32,
    typ: i32,
    dslabel: &str,
    compresslevel: i32
) -> Result<()>
[src]

Create and allocate storage for n-dimensional dataset, single or multichannel type.

Parameters

  • n_dims: declare number of dimensions
  • sizes: array containing sizes for each dimensions
  • type: type to be used, e.g., CV_8UC3, CV_32FC1, etc.
  • dslabel: specify the hdf5 dataset label. Existing dataset label will cause an error.
  • compresslevel: specify the compression level 0-9 to be used, H5_NONE is the default value and means no compression. The value 0 also means no compression. A value 9 indicating the best compression ration. Note that a higher compression level indicates a higher computational cost. It relies on GNU gzip for compression.
  • dims_chunks: each array member specifies chunking sizes to be used for block I/O, by default NULL means none at all.

Note: If the dataset already exists, an exception will be thrown. Existence of the dataset can be checked using hlexists().

  • See example below that creates a 6 dimensional storage space:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // create space for 6 dimensional CV_64FC2 matrix
  if ( ! h5io->hlexists( "nddata" ) )
   int n_dims = 5;
   int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
   h5io->dscreate( n_dims, sizes, CV_64FC2, "nddata" );
  else
   printf("DS already created, skipping\n" );
  // release
  h5io->close();

Note: Activating compression requires internal chunking. Chunking can significantly improve access speed both at read and write time, especially for windowed access logic that shifts offset inside dataset. If no custom chunking is specified, the default one will be invoked by the size of whole dataset as single big chunk of data.

  • See example of level 0 compression (shallow) using chunking against the first dimension, thus storage will consists of 100 chunks of data:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // create space for 6 dimensional CV_64FC2 matrix
  if ( ! h5io->hlexists( "nddata" ) )
   int n_dims = 5;
   int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
   int chunks[n_dims] = {   1, 100, 20, 10, 5, 5 };
   h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", 0, chunks );
  else
   printf("DS already created, skipping\n" );
  // release
  h5io->close();

Note: A value of H5_UNLIMITED inside the sizes array means unlimited data on that dimension, thus it is possible to expand anytime such dataset on those unlimited directions. Presence of H5_UNLIMITED on any dimension requires to define custom chunking. No default chunking will be defined in unlimited scenario since the default size on that dimension will be zero, and will grow once dataset is written. Writing into dataset that has H5_UNLIMITED on some of its dimension requires dsinsert() instead of dswrite() that allows growth on unlimited dimension instead of dswrite() that allows to write only in predefined data space.

  • Example below shows a 3 dimensional dataset using no compression with all unlimited sizes and one unit chunking:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  int n_dims = 3;
  int chunks[n_dims] = { 1, 1, 1 };
  int dsdims[n_dims] = { cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED };
  h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", cv::hdf::HDF5::H5_NONE, chunks );
  // release
  h5io->close();

Overloaded parameters

pub fn dscreate_2(
    &self,
    rows: i32,
    cols: i32,
    typ: i32,
    dslabel: &str,
    compresslevel: i32,
    dims_chunks: &Vector<i32>
) -> Result<()>
[src]

Create and allocate storage for n-dimensional dataset, single or multichannel type.

Parameters

  • n_dims: declare number of dimensions
  • sizes: array containing sizes for each dimensions
  • type: type to be used, e.g., CV_8UC3, CV_32FC1, etc.
  • dslabel: specify the hdf5 dataset label. Existing dataset label will cause an error.
  • compresslevel: specify the compression level 0-9 to be used, H5_NONE is the default value and means no compression. The value 0 also means no compression. A value 9 indicating the best compression ration. Note that a higher compression level indicates a higher computational cost. It relies on GNU gzip for compression.
  • dims_chunks: each array member specifies chunking sizes to be used for block I/O, by default NULL means none at all.

Note: If the dataset already exists, an exception will be thrown. Existence of the dataset can be checked using hlexists().

  • See example below that creates a 6 dimensional storage space:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // create space for 6 dimensional CV_64FC2 matrix
  if ( ! h5io->hlexists( "nddata" ) )
   int n_dims = 5;
   int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
   h5io->dscreate( n_dims, sizes, CV_64FC2, "nddata" );
  else
   printf("DS already created, skipping\n" );
  // release
  h5io->close();

Note: Activating compression requires internal chunking. Chunking can significantly improve access speed both at read and write time, especially for windowed access logic that shifts offset inside dataset. If no custom chunking is specified, the default one will be invoked by the size of whole dataset as single big chunk of data.

  • See example of level 0 compression (shallow) using chunking against the first dimension, thus storage will consists of 100 chunks of data:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // create space for 6 dimensional CV_64FC2 matrix
  if ( ! h5io->hlexists( "nddata" ) )
   int n_dims = 5;
   int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
   int chunks[n_dims] = {   1, 100, 20, 10, 5, 5 };
   h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", 0, chunks );
  else
   printf("DS already created, skipping\n" );
  // release
  h5io->close();

Note: A value of H5_UNLIMITED inside the sizes array means unlimited data on that dimension, thus it is possible to expand anytime such dataset on those unlimited directions. Presence of H5_UNLIMITED on any dimension requires to define custom chunking. No default chunking will be defined in unlimited scenario since the default size on that dimension will be zero, and will grow once dataset is written. Writing into dataset that has H5_UNLIMITED on some of its dimension requires dsinsert() instead of dswrite() that allows growth on unlimited dimension instead of dswrite() that allows to write only in predefined data space.

  • Example below shows a 3 dimensional dataset using no compression with all unlimited sizes and one unit chunking:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  int n_dims = 3;
  int chunks[n_dims] = { 1, 1, 1 };
  int dsdims[n_dims] = { cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED };
  h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", cv::hdf::HDF5::H5_NONE, chunks );
  // release
  h5io->close();

Overloaded parameters

pub fn dscreate_3(
    &self,
    rows: i32,
    cols: i32,
    typ: i32,
    dslabel: &str,
    compresslevel: i32,
    dims_chunks: &i32
) -> Result<()>
[src]

Create and allocate storage for two dimensional single or multi channel dataset.

Parameters

  • rows: declare amount of rows
  • cols: declare amount of columns
  • type: type to be used, e.g, CV_8UC3, CV_32FC1 and etc.
  • dslabel: specify the hdf5 dataset label. Existing dataset label will cause an error.
  • compresslevel: specify the compression level 0-9 to be used, H5_NONE is the default value and means no compression. The value 0 also means no compression. A value 9 indicating the best compression ration. Note that a higher compression level indicates a higher computational cost. It relies on GNU gzip for compression.
  • dims_chunks: each array member specifies the chunking size to be used for block I/O, by default NULL means none at all.

Note: If the dataset already exists, an exception will be thrown (CV_Error() is called).

  • Existence of the dataset can be checked using hlexists(), see in this example:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // create space for 100x50 CV_64FC2 matrix
  if ( ! h5io->hlexists( "hilbert" ) )
   h5io->dscreate( 100, 50, CV_64FC2, "hilbert" );
  else
   printf("DS already created, skipping\n" );
  // release
  h5io->close();

Note: Activating compression requires internal chunking. Chunking can significantly improve access speed both at read and write time, especially for windowed access logic that shifts offset inside dataset. If no custom chunking is specified, the default one will be invoked by the size of the whole dataset as a single big chunk of data.

  • See example of level 9 compression using internal default chunking:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // create level 9 compressed space for CV_64FC2 matrix
  if ( ! h5io->hlexists( "hilbert", 9 ) )
   h5io->dscreate( 100, 50, CV_64FC2, "hilbert", 9 );
  else
   printf("DS already created, skipping\n" );
  // release
  h5io->close();

Note: A value of H5_UNLIMITED for rows or cols or both means unlimited data on the specified dimension, thus, it is possible to expand anytime such a dataset on row, col or on both directions. Presence of H5_UNLIMITED on any dimension requires to define custom chunking. No default chunking will be defined in the unlimited scenario since default size on that dimension will be zero, and will grow once dataset is written. Writing into a dataset that has H5_UNLIMITED on some of its dimensions requires dsinsert() that allows growth on unlimited dimensions, instead of dswrite() that allows to write only in predefined data space.

  • Example below shows no compression but unlimited dimension on cols using 100x100 internal chunking:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // create level 9 compressed space for CV_64FC2 matrix
  int chunks[2] = { 100, 100 };
  h5io->dscreate( 100, cv::hdf::HDF5::H5_UNLIMITED, CV_64FC2, "hilbert", cv::hdf::HDF5::H5_NONE, chunks );
  // release
  h5io->close();

Note: It is not thread safe, it must be called only once at dataset creation, otherwise an exception will occur. Multiple datasets inside a single hdf5 file are allowed.

pub fn dscreate_4(
    &self,
    n_dims: i32,
    sizes: &i32,
    typ: i32,
    dslabel: &str
) -> Result<()>
[src]

pub fn dscreate_5(
    &self,
    n_dims: i32,
    sizes: &i32,
    typ: i32,
    dslabel: &str,
    compresslevel: i32
) -> Result<()>
[src]

pub fn dscreate_6(
    &self,
    sizes: &Vector<i32>,
    typ: i32,
    dslabel: &str,
    compresslevel: i32,
    dims_chunks: &Vector<i32>
) -> Result<()>
[src]

C++ default parameters

  • compresslevel: HDF5::H5_NONE
  • dims_chunks: vector()

pub fn dscreate_7(
    &self,
    n_dims: i32,
    sizes: &i32,
    typ: i32,
    dslabel: &str,
    compresslevel: i32,
    dims_chunks: &i32
) -> Result<()>
[src]

Create and allocate storage for n-dimensional dataset, single or multichannel type.

Parameters

  • n_dims: declare number of dimensions
  • sizes: array containing sizes for each dimensions
  • type: type to be used, e.g., CV_8UC3, CV_32FC1, etc.
  • dslabel: specify the hdf5 dataset label. Existing dataset label will cause an error.
  • compresslevel: specify the compression level 0-9 to be used, H5_NONE is the default value and means no compression. The value 0 also means no compression. A value 9 indicating the best compression ration. Note that a higher compression level indicates a higher computational cost. It relies on GNU gzip for compression.
  • dims_chunks: each array member specifies chunking sizes to be used for block I/O, by default NULL means none at all.

Note: If the dataset already exists, an exception will be thrown. Existence of the dataset can be checked using hlexists().

  • See example below that creates a 6 dimensional storage space:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // create space for 6 dimensional CV_64FC2 matrix
  if ( ! h5io->hlexists( "nddata" ) )
   int n_dims = 5;
   int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
   h5io->dscreate( n_dims, sizes, CV_64FC2, "nddata" );
  else
   printf("DS already created, skipping\n" );
  // release
  h5io->close();

Note: Activating compression requires internal chunking. Chunking can significantly improve access speed both at read and write time, especially for windowed access logic that shifts offset inside dataset. If no custom chunking is specified, the default one will be invoked by the size of whole dataset as single big chunk of data.

  • See example of level 0 compression (shallow) using chunking against the first dimension, thus storage will consists of 100 chunks of data:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // create space for 6 dimensional CV_64FC2 matrix
  if ( ! h5io->hlexists( "nddata" ) )
   int n_dims = 5;
   int dsdims[n_dims] = { 100, 100, 20, 10, 5, 5 };
   int chunks[n_dims] = {   1, 100, 20, 10, 5, 5 };
   h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", 0, chunks );
  else
   printf("DS already created, skipping\n" );
  // release
  h5io->close();

Note: A value of H5_UNLIMITED inside the sizes array means unlimited data on that dimension, thus it is possible to expand anytime such dataset on those unlimited directions. Presence of H5_UNLIMITED on any dimension requires to define custom chunking. No default chunking will be defined in unlimited scenario since the default size on that dimension will be zero, and will grow once dataset is written. Writing into dataset that has H5_UNLIMITED on some of its dimension requires dsinsert() instead of dswrite() that allows growth on unlimited dimension instead of dswrite() that allows to write only in predefined data space.

  • Example below shows a 3 dimensional dataset using no compression with all unlimited sizes and one unit chunking:
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  int n_dims = 3;
  int chunks[n_dims] = { 1, 1, 1 };
  int dsdims[n_dims] = { cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED, cv::hdf::HDF5::H5_UNLIMITED };
  h5io->dscreate( n_dims, dsdims, CV_64FC2, "nddata", cv::hdf::HDF5::H5_NONE, chunks );
  // release
  h5io->close();

pub fn dsgetsize(&self, dslabel: &str, dims_flag: i32) -> Result<Vector<i32>>[src]

Fetch dataset sizes

Parameters

  • dslabel: specify the hdf5 dataset label to be measured.
  • dims_flag: will fetch dataset dimensions on H5_GETDIMS, dataset maximum dimensions on H5_GETMAXDIMS, and chunk sizes on H5_GETCHUNKDIMS.

Returns vector object containing sizes of dataset on each dimensions.

Note: Resulting vector size will match the amount of dataset dimensions. By default H5_GETDIMS will return actual dataset dimensions. Using H5_GETMAXDIM flag will get maximum allowed dimension which normally match actual dataset dimension but can hold H5_UNLIMITED value if dataset was prepared in unlimited mode on some of its dimension. It can be useful to check existing dataset dimensions before overwrite it as whole or subset. Trying to write with oversized source data into dataset target will thrown exception. The H5_GETCHUNKDIMS will return the dimension of chunk if dataset was created with chunking options otherwise returned vector size will be zero.

C++ default parameters

  • dims_flag: HDF5::H5_GETDIMS

pub fn dsgettype(&self, dslabel: &str) -> Result<i32>[src]

Fetch dataset type

Parameters

  • dslabel: specify the hdf5 dataset label to be checked.

Returns the stored matrix type. This is an identifier compatible with the CvMat type system, like e.g. CV_16SC5 (16-bit signed 5-channel array), and so on.

Note: Result can be parsed with CV_MAT_CN() to obtain amount of channels and CV_MAT_DEPTH() to obtain native cvdata type. It is thread safe.

pub fn dswrite(&self, array: &dyn ToInputArray, dslabel: &str) -> Result<()>[src]

pub fn dswrite_1(
    &self,
    array: &dyn ToInputArray,
    dslabel: &str,
    dims_offset: &i32
) -> Result<()>
[src]

pub fn dswrite_2(
    &self,
    array: &dyn ToInputArray,
    dslabel: &str,
    dims_offset: &Vector<i32>,
    dims_counts: &Vector<i32>
) -> Result<()>
[src]

C++ default parameters

  • dims_counts: vector()

pub fn dswrite_3(
    &self,
    array: &dyn ToInputArray,
    dslabel: &str,
    dims_offset: &i32,
    dims_counts: &i32
) -> Result<()>
[src]

Write or overwrite a Mat object into specified dataset of hdf5 file.

Parameters

  • Array: specify Mat data array to be written.
  • dslabel: specify the target hdf5 dataset label.
  • dims_offset: each array member specify the offset location over dataset's each dimensions from where InputArray will be (over)written into dataset.
  • dims_counts: each array member specifies the amount of data over dataset's each dimensions from InputArray that will be written into dataset.

Writes Mat object into targeted dataset.

Note: If dataset is not created and does not exist it will be created automatically. Only Mat is supported and it must be continuous. It is thread safe but it is recommended that writes to happen over separate non-overlapping regions. Multiple datasets can be written inside a single hdf5 file.

  • Example below writes a 100x100 CV_64FC2 matrix into a dataset. No dataset pre-creation required. If routine is called multiple times dataset will be just overwritten:
  // dual channel hilbert matrix
  cv::Mat H(100, 100, CV_64FC2);
  for(int i = 0; i < H.rows; i++)
   for(int j = 0; j < H.cols; j++)
   {
       H.at<cv::Vec2d>(i,j)[0] =  1./(i+j+1);
       H.at<cv::Vec2d>(i,j)[1] = -1./(i+j+1);
       count++;
   }
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // write / overwrite dataset
  h5io->dswrite( H, "hilbert" );
  // release
  h5io->close();
  • Example below writes a smaller 50x100 matrix into 100x100 compressed space optimised by two 50x100 chunks. Matrix is written twice into first half (0->50) and second half (50->100) of data space using offset.
  // dual channel hilbert matrix
  cv::Mat H(50, 100, CV_64FC2);
  for(int i = 0; i < H.rows; i++)
   for(int j = 0; j < H.cols; j++)
   {
       H.at<cv::Vec2d>(i,j)[0] =  1./(i+j+1);
       H.at<cv::Vec2d>(i,j)[1] = -1./(i+j+1);
       count++;
   }
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // optimise dataset by two chunks
  int chunks[2] = { 50, 100 };
  // create 100x100 CV_64FC2 compressed space
  h5io->dscreate( 100, 100, CV_64FC2, "hilbert", 9, chunks );
  // write into first half
  int offset1[2] = { 0, 0 };
  h5io->dswrite( H, "hilbert", offset1 );
  // write into second half
  int offset2[2] = { 50, 0 };
  h5io->dswrite( H, "hilbert", offset2 );
  // release
  h5io->close();

pub fn dsinsert(&self, array: &dyn ToInputArray, dslabel: &str) -> Result<()>[src]

pub fn dsinsert_1(
    &self,
    array: &dyn ToInputArray,
    dslabel: &str,
    dims_offset: &i32
) -> Result<()>
[src]

pub fn dsinsert_2(
    &self,
    array: &dyn ToInputArray,
    dslabel: &str,
    dims_offset: &Vector<i32>,
    dims_counts: &Vector<i32>
) -> Result<()>
[src]

C++ default parameters

  • dims_counts: vector()

pub fn dsinsert_3(
    &self,
    array: &dyn ToInputArray,
    dslabel: &str,
    dims_offset: &i32,
    dims_counts: &i32
) -> Result<()>
[src]

Insert or overwrite a Mat object into specified dataset and auto expand dataset size if unlimited property allows.

Parameters

  • Array: specify Mat data array to be written.
  • dslabel: specify the target hdf5 dataset label.
  • dims_offset: each array member specify the offset location over dataset's each dimensions from where InputArray will be (over)written into dataset.
  • dims_counts: each array member specify the amount of data over dataset's each dimensions from InputArray that will be written into dataset.

Writes Mat object into targeted dataset and autoexpand dataset dimension if allowed.

Note: Unlike dswrite(), datasets are not created automatically. Only Mat is supported and it must be continuous. If dsinsert() happens over outer regions of dataset dimensions and on that dimension of dataset is in unlimited mode then dataset is expanded, otherwise exception is thrown. To create datasets with unlimited property on specific or more dimensions see dscreate() and the optional H5_UNLIMITED flag at creation time. It is not thread safe over same dataset but multiple datasets can be merged inside a single hdf5 file.

  • Example below creates unlimited rows x 100 cols and expands rows 5 times with dsinsert() using single 100x100 CV_64FC2 over the dataset. Final size will have 5x100 rows and 100 cols, reflecting H matrix five times over row's span. Chunks size is 100x100 just optimized against the H matrix size having compression disabled. If routine is called multiple times dataset will be just overwritten:
  // dual channel hilbert matrix
  cv::Mat H(50, 100, CV_64FC2);
  for(int i = 0; i < H.rows; i++)
   for(int j = 0; j < H.cols; j++)
   {
       H.at<cv::Vec2d>(i,j)[0] =  1./(i+j+1);
       H.at<cv::Vec2d>(i,j)[1] = -1./(i+j+1);
       count++;
   }
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // optimise dataset by chunks
  int chunks[2] = { 100, 100 };
  // create Unlimited x 100 CV_64FC2 space
  h5io->dscreate( cv::hdf::HDF5::H5_UNLIMITED, 100, CV_64FC2, "hilbert", cv::hdf::HDF5::H5_NONE, chunks );
  // write into first half
  int offset[2] = { 0, 0 };
  for ( int t = 0; t < 5; t++ )
  {
   offset[0] += 100 * t;
   h5io->dsinsert( H, "hilbert", offset );
  }
  // release
  h5io->close();

pub fn dsread(&self, array: &mut dyn ToOutputArray, dslabel: &str) -> Result<()>[src]

pub fn dsread_1(
    &self,
    array: &mut dyn ToOutputArray,
    dslabel: &str,
    dims_offset: &i32
) -> Result<()>
[src]

pub fn dsread_2(
    &self,
    array: &mut dyn ToOutputArray,
    dslabel: &str,
    dims_offset: &Vector<i32>,
    dims_counts: &Vector<i32>
) -> Result<()>
[src]

C++ default parameters

  • dims_counts: vector()

pub fn dsread_3(
    &self,
    array: &mut dyn ToOutputArray,
    dslabel: &str,
    dims_offset: &i32,
    dims_counts: &i32
) -> Result<()>
[src]

Read specific dataset from hdf5 file into Mat object.

Parameters

  • Array: Mat container where data reads will be returned.
  • dslabel: specify the source hdf5 dataset label.
  • dims_offset: each array member specify the offset location over each dimensions from where dataset starts to read into OutputArray.
  • dims_counts: each array member specify the amount over dataset's each dimensions of dataset to read into OutputArray.

Reads out Mat object reflecting the stored dataset.

Note: If hdf5 file does not exist an exception will be thrown. Use hlexists() to check dataset presence. It is thread safe.

  • Example below reads a dataset:
  // open hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // blank Mat container
  cv::Mat H;
  // read hibert dataset
  h5io->read( H, "hilbert" );
  // release
  h5io->close();
  • Example below perform read of 3x5 submatrix from second row and third element.
  // open hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // blank Mat container
  cv::Mat H;
  int offset[2] = { 1, 2 };
  int counts[2] = { 3, 5 };
  // read hibert dataset
  h5io->read( H, "hilbert", offset, counts );
  // release
  h5io->close();

pub fn kpgetsize(&self, kplabel: &str, dims_flag: i32) -> Result<i32>[src]

Fetch keypoint dataset size

Parameters

  • kplabel: specify the hdf5 dataset label to be measured.
  • dims_flag: will fetch dataset dimensions on H5_GETDIMS, and dataset maximum dimensions on H5_GETMAXDIMS.

Returns size of keypoints dataset.

Note: Resulting size will match the amount of keypoints. By default H5_GETDIMS will return actual dataset dimension. Using H5_GETMAXDIM flag will get maximum allowed dimension which normally match actual dataset dimension but can hold H5_UNLIMITED value if dataset was prepared in unlimited mode. It can be useful to check existing dataset dimension before overwrite it as whole or subset. Trying to write with oversized source data into dataset target will thrown exception. The H5_GETCHUNKDIMS will return the dimension of chunk if dataset was created with chunking options otherwise returned vector size will be zero.

C++ default parameters

  • dims_flag: HDF5::H5_GETDIMS

pub fn kpcreate(
    &self,
    size: i32,
    kplabel: &str,
    compresslevel: i32,
    chunks: i32
) -> Result<()>
[src]

Create and allocate special storage for cv::KeyPoint dataset.

Parameters

  • size: declare fixed number of KeyPoints
  • kplabel: specify the hdf5 dataset label, any existing dataset with the same label will be overwritten.
  • compresslevel: specify the compression level 0-9 to be used, H5_NONE is default and means no compression.
  • chunks: each array member specifies chunking sizes to be used for block I/O, H5_NONE is default and means no compression.

Note: If the dataset already exists an exception will be thrown. Existence of the dataset can be checked using hlexists().

  • See example below that creates space for 100 keypoints in the dataset:
  // open hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  if ( ! h5io->hlexists( "keypoints" ) )
   h5io->kpcreate( 100, "keypoints" );
  else
   printf("DS already created, skipping\n" );

Note: A value of H5_UNLIMITED for size means unlimited keypoints, thus is possible to expand anytime such dataset by adding or inserting. Presence of H5_UNLIMITED require to define custom chunking. No default chunking will be defined in unlimited scenario since default size on that dimension will be zero, and will grow once dataset is written. Writing into dataset that have H5_UNLIMITED on some of its dimension requires kpinsert() that allow growth on unlimited dimension instead of kpwrite() that allows to write only in predefined data space.

  • See example below that creates unlimited space for keypoints chunking size of 100 but no compression:
  // open hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  if ( ! h5io->hlexists( "keypoints" ) )
   h5io->kpcreate( cv::hdf::HDF5::H5_UNLIMITED, "keypoints", cv::hdf::HDF5::H5_NONE, 100 );
  else
   printf("DS already created, skipping\n" );

C++ default parameters

  • compresslevel: H5_NONE
  • chunks: H5_NONE

pub fn kpwrite(
    &self,
    keypoints: Vector<KeyPoint>,
    kplabel: &str,
    offset: i32,
    counts: i32
) -> Result<()>
[src]

Write or overwrite list of KeyPoint into specified dataset of hdf5 file.

Parameters

  • keypoints: specify keypoints data list to be written.
  • kplabel: specify the target hdf5 dataset label.
  • offset: specify the offset location on dataset from where keypoints will be (over)written into dataset.
  • counts: specify the amount of keypoints that will be written into dataset.

Writes vector object into targeted dataset.

Note: If dataset is not created and does not exist it will be created automatically. It is thread safe but it is recommended that writes to happen over separate non overlapping regions. Multiple datasets can be written inside single hdf5 file.

  • Example below writes a 100 keypoints into a dataset. No dataset precreation required. If routine is called multiple times dataset will be just overwritten:
  // generate 100 dummy keypoints
  std::vector<cv::KeyPoint> keypoints;
  for(int i = 0; i < 100; i++)
   keypoints.push_back( cv::KeyPoint(i, -i, 1, -1, 0, 0, -1) );
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // write / overwrite dataset
  h5io->kpwrite( keypoints, "keypoints" );
  // release
  h5io->close();
  • Example below uses smaller set of 50 keypoints and writes into compressed space of 100 keypoints optimised by 10 chunks. Same keypoint set is written three times, first into first half (0->50) and at second half (50->75) then into remaining slots (75->99) of data space using offset and count parameters to settle the window for write access.If routine is called multiple times dataset will be just overwritten:
  // generate 50 dummy keypoints
  std::vector<cv::KeyPoint> keypoints;
  for(int i = 0; i < 50; i++)
   keypoints.push_back( cv::KeyPoint(i, -i, 1, -1, 0, 0, -1) );
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // create maximum compressed space of size 100 with chunk size 10
  h5io->kpcreate( 100, "keypoints", 9, 10 );
  // write into first half
  h5io->kpwrite( keypoints, "keypoints", 0 );
  // write first 25 keypoints into second half
  h5io->kpwrite( keypoints, "keypoints", 50, 25 );
  // write first 25 keypoints into remained space of second half
  h5io->kpwrite( keypoints, "keypoints", 75, 25 );
  // release
  h5io->close();

C++ default parameters

  • offset: H5_NONE
  • counts: H5_NONE

pub fn kpinsert(
    &self,
    keypoints: Vector<KeyPoint>,
    kplabel: &str,
    offset: i32,
    counts: i32
) -> Result<()>
[src]

Insert or overwrite list of KeyPoint into specified dataset and autoexpand dataset size if unlimited property allows.

Parameters

  • keypoints: specify keypoints data list to be written.
  • kplabel: specify the target hdf5 dataset label.
  • offset: specify the offset location on dataset from where keypoints will be (over)written into dataset.
  • counts: specify the amount of keypoints that will be written into dataset.

Writes vector object into targeted dataset and autoexpand dataset dimension if allowed.

Note: Unlike kpwrite(), datasets are not created automatically. If dsinsert() happen over outer region of dataset and dataset has been created in unlimited mode then dataset is expanded, otherwise exception is thrown. To create datasets with unlimited property see kpcreate() and the optional H5_UNLIMITED flag at creation time. It is not thread safe over same dataset but multiple datasets can be merged inside single hdf5 file.

  • Example below creates unlimited space for keypoints storage, and inserts a list of 10 keypoints ten times into that space. Final dataset will have 100 keypoints. Chunks size is 10 just optimized against list of keypoints. If routine is called multiple times dataset will be just overwritten:
  // generate 10 dummy keypoints
  std::vector<cv::KeyPoint> keypoints;
  for(int i = 0; i < 10; i++)
   keypoints.push_back( cv::KeyPoint(i, -i, 1, -1, 0, 0, -1) );
  // open / autocreate hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // create unlimited size space with chunk size of 10
  h5io->kpcreate( cv::hdf::HDF5::H5_UNLIMITED, "keypoints", -1, 10 );
  // insert 10 times same 10 keypoints
  for(int i = 0; i < 10; i++)
   h5io->kpinsert( keypoints, "keypoints", i * 10 );
  // release
  h5io->close();

C++ default parameters

  • offset: H5_NONE
  • counts: H5_NONE

pub fn kpread(
    &self,
    keypoints: &mut Vector<KeyPoint>,
    kplabel: &str,
    offset: i32,
    counts: i32
) -> Result<()>
[src]

Read specific keypoint dataset from hdf5 file into vector object.

Parameters

  • keypoints: vector container where data reads will be returned.
  • kplabel: specify the source hdf5 dataset label.
  • offset: specify the offset location over dataset from where read starts.
  • counts: specify the amount of keypoints from dataset to read.

Reads out vector object reflecting the stored dataset.

Note: If hdf5 file does not exist an exception will be thrown. Use hlexists() to check dataset presence. It is thread safe.

  • Example below reads a dataset containing keypoints starting with second entry:
  // open hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // blank KeyPoint container
  std::vector<cv::KeyPoint> keypoints;
  // read keypoints starting second one
  h5io->kpread( keypoints, "keypoints", 1 );
  // release
  h5io->close();
  • Example below perform read of 3 keypoints from second entry.
  // open hdf5 file
  cv::Ptr<cv::hdf::HDF5> h5io = cv::hdf::open( "mytest.h5" );
  // blank KeyPoint container
  std::vector<cv::KeyPoint> keypoints;
  // read three keypoints starting second one
  h5io->kpread( keypoints, "keypoints", 1, 3 );
  // release
  h5io->close();

C++ default parameters

  • offset: H5_NONE
  • counts: H5_NONE
Loading content...

Implementors

impl HDF5 for PtrOfHDF5[src]

Loading content...