Trait zarrs::array::chunk_grid::ChunkGridTraits

source ·
pub trait ChunkGridTraits: DynClone + Debug + Send + Sync {
Show 19 methods // Required methods fn create_metadata(&self) -> Metadata; fn dimensionality(&self) -> usize; unsafe fn grid_shape_unchecked( &self, array_shape: &[u64] ) -> Option<ArrayShape>; unsafe fn chunk_origin_unchecked( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Option<ArrayIndices>; unsafe fn chunk_shape_unchecked( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Option<ChunkShape>; unsafe fn chunk_shape_u64_unchecked( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Option<ArrayShape>; unsafe fn chunk_indices_unchecked( &self, array_indices: &[u64], array_shape: &[u64] ) -> Option<ArrayIndices>; unsafe fn chunk_element_indices_unchecked( &self, array_indices: &[u64], array_shape: &[u64] ) -> Option<ArrayIndices>; // Provided methods fn grid_shape( &self, array_shape: &[u64] ) -> Result<Option<ArrayShape>, IncompatibleDimensionalityError> { ... } fn chunk_shape( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Result<Option<ChunkShape>, IncompatibleDimensionalityError> { ... } fn chunk_shape_u64( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Result<Option<ArrayShape>, IncompatibleDimensionalityError> { ... } fn chunk_origin( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Result<Option<ArrayIndices>, IncompatibleDimensionalityError> { ... } fn subset( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Result<Option<ArraySubset>, IncompatibleDimensionalityError> { ... } fn chunks_subset( &self, chunks: &ArraySubset, array_shape: &[u64] ) -> Result<Option<ArraySubset>, IncompatibleDimensionalityError> { ... } fn chunk_indices( &self, array_indices: &[u64], array_shape: &[u64] ) -> Result<Option<ArrayIndices>, IncompatibleDimensionalityError> { ... } fn chunk_element_indices( &self, array_indices: &[u64], array_shape: &[u64] ) -> Result<Option<ArrayIndices>, IncompatibleDimensionalityError> { ... } fn array_indices_inbounds( &self, array_indices: &[u64], array_shape: &[u64] ) -> bool { ... } fn chunk_indices_inbounds( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> bool { ... } unsafe fn subset_unchecked( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Option<ArraySubset> { ... }
}
Expand description

Chunk grid traits.

Required Methods§

source

fn create_metadata(&self) -> Metadata

Create metadata.

source

fn dimensionality(&self) -> usize

The dimensionality of the grid.

source

unsafe fn grid_shape_unchecked(&self, array_shape: &[u64]) -> Option<ArrayShape>

See ChunkGridTraits::grid_shape.

§Safety

The length of array_shape must match the dimensionality of the chunk grid.

source

unsafe fn chunk_origin_unchecked( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Option<ArrayIndices>

See ChunkGridTraits::chunk_origin.

§Safety

The length of chunk_indices must match the dimensionality of the chunk grid.

source

unsafe fn chunk_shape_unchecked( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Option<ChunkShape>

See ChunkGridTraits::chunk_shape.

§Safety

The length of chunk_indices must match the dimensionality of the chunk grid.

source

unsafe fn chunk_shape_u64_unchecked( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Option<ArrayShape>

See ChunkGridTraits::chunk_shape_u64.

§Safety

The length of chunk_indices must match the dimensionality of the chunk grid.

source

unsafe fn chunk_indices_unchecked( &self, array_indices: &[u64], array_shape: &[u64] ) -> Option<ArrayIndices>

See ChunkGridTraits::chunk_indices.

§Safety

The length of array_indices must match the dimensionality of the chunk grid.

source

unsafe fn chunk_element_indices_unchecked( &self, array_indices: &[u64], array_shape: &[u64] ) -> Option<ArrayIndices>

See ChunkGridTraits::chunk_element_indices.

§Safety

The length of array_indices must match the dimensionality of the chunk grid.

Provided Methods§

source

fn grid_shape( &self, array_shape: &[u64] ) -> Result<Option<ArrayShape>, IncompatibleDimensionalityError>

The grid shape (i.e. number of chunks).

Zero sized array dimensions are considered “unlimited”. The grid shape will be unlimited where the array shape is unlimited, if supported by the chunk grid. Returns None if the grid shape cannot be determined, likely due to an incompatibility with the array_shape.

§Errors

Returns IncompatibleDimensionalityError if the length of array_shape does not match the dimensionality of the chunk grid.

source

fn chunk_shape( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Result<Option<ChunkShape>, IncompatibleDimensionalityError>

The shape of the chunk at chunk_indices.

Returns None if the shape of the chunk at chunk_indices cannot be determined.

§Errors

Returns IncompatibleDimensionalityError if chunk_indices or array_shape do not match the dimensionality of the chunk grid.

Examples found in repository?
examples/rectangular_array_write_read.rs (line 84)
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
fn rectangular_array_write_read() -> Result<(), Box<dyn std::error::Error>> {
    use rayon::prelude::{IntoParallelIterator, ParallelIterator};
    use zarrs::array::ChunkGrid;
    use zarrs::{
        array::{chunk_grid::RectangularChunkGrid, codec, FillValue},
        node::Node,
    };
    use zarrs::{
        array::{DataType, ZARR_NAN_F32},
        array_subset::ArraySubset,
        storage::store,
    };

    // Create a store
    // let path = tempfile::TempDir::new()?;
    // let mut store: ReadableWritableListableStorage = Arc::new(store::FilesystemStore::new(path.path())?);
    let mut store: ReadableWritableListableStorage = std::sync::Arc::new(store::MemoryStore::new());
    if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1) {
        if arg1 == "--usage-log" {
            let log_writer = Arc::new(std::sync::Mutex::new(
                // std::io::BufWriter::new(
                std::io::stdout(),
                //    )
            ));
            let usage_log = Arc::new(UsageLogStorageTransformer::new(log_writer, || {
                chrono::Utc::now().format("[%T%.3f] ").to_string()
            }));
            store = usage_log
                .clone()
                .create_readable_writable_listable_transformer(store);
        }
    }

    // Create a group
    let group_path = "/group";
    let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;

    // Update group metadata
    group
        .attributes_mut()
        .insert("foo".into(), serde_json::Value::String("bar".into()));

    // Write group metadata to store
    group.store_metadata()?;

    println!(
        "The group metadata is:\n{}\n",
        serde_json::to_string_pretty(&group.metadata()).unwrap()
    );

    // Create an array
    let array_path = "/group/array";
    let array = zarrs::array::ArrayBuilder::new(
        vec![8, 8], // array shape
        DataType::Float32,
        ChunkGrid::new(RectangularChunkGrid::new(&[
            [1, 2, 3, 2].try_into()?,
            4.try_into()?,
        ])),
        FillValue::from(ZARR_NAN_F32),
    )
    .bytes_to_bytes_codecs(vec![
        #[cfg(feature = "gzip")]
        Box::new(codec::GzipCodec::new(5)?),
    ])
    .dimension_names(["y", "x"].into())
    // .storage_transformers(vec![].into())
    .build(store.clone(), array_path)?;

    // Write array metadata to store
    array.store_metadata()?;

    // Write some chunks (in parallel)
    (0..4).into_par_iter().try_for_each(|i| {
        let chunk_grid = array.chunk_grid();
        let chunk_indices = vec![i, 0];
        if let Some(chunk_shape) = chunk_grid.chunk_shape(&chunk_indices, array.shape())? {
            let chunk_array = ndarray::ArrayD::<f32>::from_elem(
                chunk_shape
                    .iter()
                    .map(|u| u.get() as usize)
                    .collect::<Vec<_>>(),
                i as f32,
            );
            array.store_chunk_ndarray(&chunk_indices, chunk_array)
        } else {
            Err(zarrs::array::ArrayError::InvalidChunkGridIndicesError(
                chunk_indices.to_vec(),
            ))
        }
    })?;

    println!(
        "The array metadata is:\n{}\n",
        serde_json::to_string_pretty(&array.metadata()).unwrap()
    );

    // Write a subset spanning multiple chunks, including updating chunks already written
    array.store_array_subset_ndarray(
        &[3, 3], // start
        ndarray::ArrayD::<f32>::from_shape_vec(
            vec![3, 3],
            vec![0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
        )?,
    )?;

    // Store elements directly, in this case set the 7th column to 123.0
    array.store_array_subset_elements::<f32>(
        &ArraySubset::new_with_ranges(&[0..8, 6..7]),
        vec![123.0; 8],
    )?;

    // Store elements directly in a chunk, in this case set the last row of the bottom right chunk
    array.store_chunk_subset_elements::<f32>(
        // chunk indices
        &[3, 1],
        // subset within chunk
        &ArraySubset::new_with_ranges(&[1..2, 0..4]),
        vec![-4.0; 4],
    )?;

    // Read the whole array
    let subset_all = ArraySubset::new_with_shape(array.shape().to_vec());
    let data_all = array.retrieve_array_subset_ndarray::<f32>(&subset_all)?;
    println!("The whole array is:\n{data_all}\n");

    // Read a chunk back from the store
    let chunk_indices = vec![1, 0];
    let data_chunk = array.retrieve_chunk_ndarray::<f32>(&chunk_indices)?;
    println!("Chunk [1,0] is:\n{data_chunk}\n");

    // Read the central 4x2 subset of the array
    let subset_4x2 = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
    let data_4x2 = array.retrieve_array_subset_ndarray::<f32>(&subset_4x2)?;
    println!("The middle 4x2 subset is:\n{data_4x2}\n");

    // Show the hierarchy
    let node = Node::new(&*store, "/").unwrap();
    let tree = node.hierarchy_tree();
    println!("The zarr hierarchy tree is:\n{tree}");

    Ok(())
}
More examples
Hide additional examples
examples/sharded_array_write_read.rs (line 91)
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
fn sharded_array_write_read() -> Result<(), Box<dyn std::error::Error>> {
    use zarrs::{
        array::{
            codec::{self, array_to_bytes::sharding::ShardingCodecBuilder},
            DataType, FillValue,
        },
        array_subset::ArraySubset,
        node::Node,
        storage::store,
    };

    use rayon::prelude::{IntoParallelIterator, ParallelIterator};
    use std::sync::Arc;

    // Create a store
    // let path = tempfile::TempDir::new()?;
    // let mut store: ReadableWritableListableStorage = Arc::new(store::FilesystemStore::new(path.path())?);
    // let mut store: ReadableWritableListableStorage = Arc::new(store::FilesystemStore::new("tests/data/sharded_array_write_read.zarr")?);
    let mut store: ReadableWritableListableStorage = Arc::new(store::MemoryStore::new());
    if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1) {
        if arg1 == "--usage-log" {
            let log_writer = Arc::new(std::sync::Mutex::new(
                // std::io::BufWriter::new(
                std::io::stdout(),
                //    )
            ));
            let usage_log = Arc::new(UsageLogStorageTransformer::new(log_writer, || {
                chrono::Utc::now().format("[%T%.3f] ").to_string()
            }));
            store = usage_log
                .clone()
                .create_readable_writable_listable_transformer(store);
        }
    }

    // Create a group
    let group_path = "/group";
    let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;

    // Update group metadata
    group
        .attributes_mut()
        .insert("foo".into(), serde_json::Value::String("bar".into()));

    // Write group metadata to store
    group.store_metadata()?;

    // Create an array
    let array_path = "/group/array";
    let shard_shape = vec![4, 8];
    let inner_chunk_shape = vec![4, 4];
    let mut sharding_codec_builder =
        ShardingCodecBuilder::new(inner_chunk_shape.as_slice().try_into()?);
    sharding_codec_builder.bytes_to_bytes_codecs(vec![
        #[cfg(feature = "gzip")]
        Box::new(codec::GzipCodec::new(5)?),
    ]);
    let array = zarrs::array::ArrayBuilder::new(
        vec![8, 8], // array shape
        DataType::UInt16,
        shard_shape.try_into()?,
        FillValue::from(0u16),
    )
    .array_to_bytes_codec(Box::new(sharding_codec_builder.build()))
    .dimension_names(["y", "x"].into())
    // .storage_transformers(vec![].into())
    .build(store.clone(), array_path)?;

    // Write array metadata to store
    array.store_metadata()?;

    // The array metadata is
    println!(
        "The array metadata is:\n{}\n",
        serde_json::to_string_pretty(&array.metadata()).unwrap()
    );

    // Write some shards (in parallel)
    (0..2).into_par_iter().try_for_each(|s| {
        let chunk_grid = array.chunk_grid();
        let chunk_indices = vec![s, 0];
        if let Some(chunk_shape) = chunk_grid.chunk_shape(&chunk_indices, array.shape())? {
            let chunk_array = ndarray::ArrayD::<u16>::from_shape_fn(
                chunk_shape
                    .iter()
                    .map(|u| u.get() as usize)
                    .collect::<Vec<_>>(),
                |ij| {
                    (s * chunk_shape[0].get() * chunk_shape[1].get()
                        + ij[0] as u64 * chunk_shape[1].get()
                        + ij[1] as u64) as u16
                },
            );
            array.store_chunk_ndarray(&chunk_indices, chunk_array)
        } else {
            Err(zarrs::array::ArrayError::InvalidChunkGridIndicesError(
                chunk_indices.to_vec(),
            ))
        }
    })?;

    // Read the whole array
    let subset_all = ArraySubset::new_with_shape(array.shape().to_vec()); // the center 4x2 region
    let data_all = array.retrieve_array_subset_ndarray::<u16>(&subset_all)?;
    println!("The whole array is:\n{data_all}\n");

    // Read a shard back from the store
    let shard_indices = vec![1, 0];
    let data_shard = array.retrieve_chunk_ndarray::<u16>(&shard_indices)?;
    println!("Shard [1,0] is:\n{data_shard}\n");

    // Read an inner chunk from the store
    let subset_chunk_1_0 = ArraySubset::new_with_ranges(&[4..8, 0..4]);
    let data_chunk = array.retrieve_array_subset_ndarray::<u16>(&subset_chunk_1_0)?;
    println!("Chunk [1,0] is:\n{data_chunk}\n");

    // Read the central 4x2 subset of the array
    let subset_4x2 = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
    let data_4x2 = array.retrieve_array_subset_ndarray::<u16>(&subset_4x2)?;
    println!("The middle 4x2 subset is:\n{data_4x2}\n");

    // Decode inner chunks
    // In some cases, it might be preferable to decode inner chunks in a shard directly.
    // If using the partial decoder, then the shard index will only be read once from the store.
    let partial_decoder = array.partial_decoder(&[0, 0])?;
    let inner_chunks_to_decode = vec![
        ArraySubset::new_with_start_shape(vec![0, 0], inner_chunk_shape.clone())?,
        ArraySubset::new_with_start_shape(vec![0, 4], inner_chunk_shape.clone())?,
    ];
    let decoded_inner_chunks_bytes = partial_decoder.partial_decode(&inner_chunks_to_decode)?;
    let decoded_inner_chunks_ndarray = decoded_inner_chunks_bytes
        .into_iter()
        .map(|bytes| bytes_to_ndarray::<u16>(&inner_chunk_shape, bytes))
        .collect::<Result<Vec<_>, _>>()?;
    println!("Decoded inner chunks:");
    for (inner_chunk_subset, decoded_inner_chunk) in
        std::iter::zip(inner_chunks_to_decode, decoded_inner_chunks_ndarray)
    {
        println!("{inner_chunk_subset}\n{decoded_inner_chunk}\n");
    }

    // Show the hierarchy
    let node = Node::new(&*store, "/").unwrap();
    let tree = node.hierarchy_tree();
    println!("The zarr hierarchy tree is:\n{}", tree);

    println!(
        "The keys in the store are:\n[{}]",
        store.list().unwrap_or_default().iter().format(", ")
    );

    Ok(())
}
source

fn chunk_shape_u64( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Result<Option<ArrayShape>, IncompatibleDimensionalityError>

The shape of the chunk at chunk_indices as an ArrayShape (Vec<u64>).

Returns None if the shape of the chunk at chunk_indices cannot be determined.

§Errors

Returns IncompatibleDimensionalityError if chunk_indices or array_shape do not match the dimensionality of the chunk grid.

source

fn chunk_origin( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Result<Option<ArrayIndices>, IncompatibleDimensionalityError>

The origin of the chunk at chunk_indices.

Returns None if the chunk origin cannot be determined.

§Errors

Returns IncompatibleDimensionalityError if the length of chunk_indices or array_shape do not match the dimensionality of the chunk grid.

source

fn subset( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Result<Option<ArraySubset>, IncompatibleDimensionalityError>

Return the ArraySubset of the chunk at chunk_indices.

Returns None if the chunk subset cannot be determined.

§Errors

Returns IncompatibleDimensionalityError if chunk_indices or array_shape do not match the dimensionality of the chunk grid.

Examples found in repository?
examples/array_write_read.rs (line 82)
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
fn array_write_read() -> Result<(), Box<dyn std::error::Error>> {
    use std::sync::Arc;
    use zarrs::{
        array::{DataType, FillValue, ZARR_NAN_F32},
        array_subset::ArraySubset,
        node::Node,
        storage::store,
    };

    // Create a store
    // let path = tempfile::TempDir::new()?;
    // let mut store: ReadableWritableListableStorage = Arc::new(store::FilesystemStore::new(path.path())?);
    // let mut store: ReadableWritableListableStorage = Arc::new(store::FilesystemStore::new(
    //     "tests/data/array_write_read.zarr",
    // )?);
    let mut store: ReadableWritableListableStorage = Arc::new(store::MemoryStore::new());
    if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1) {
        if arg1 == "--usage-log" {
            let log_writer = Arc::new(std::sync::Mutex::new(
                // std::io::BufWriter::new(
                std::io::stdout(),
                //    )
            ));
            let usage_log = Arc::new(UsageLogStorageTransformer::new(log_writer, || {
                chrono::Utc::now().format("[%T%.3f] ").to_string()
            }));
            store = usage_log
                .clone()
                .create_readable_writable_listable_transformer(store);
        }
    }

    // Create a group
    let group_path = "/group";
    let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;

    // Update group metadata
    group
        .attributes_mut()
        .insert("foo".into(), serde_json::Value::String("bar".into()));

    // Write group metadata to store
    group.store_metadata()?;

    println!(
        "The group metadata is:\n{}\n",
        serde_json::to_string_pretty(&group.metadata()).unwrap()
    );

    // Create an array
    let array_path = "/group/array";
    let array = zarrs::array::ArrayBuilder::new(
        vec![8, 8], // array shape
        DataType::Float32,
        vec![4, 4].try_into()?, // regular chunk shape
        FillValue::from(ZARR_NAN_F32),
    )
    // .bytes_to_bytes_codecs(vec![]) // uncompressed
    .dimension_names(["y", "x"].into())
    // .storage_transformers(vec![].into())
    .build(store.clone(), array_path)?;

    // Write array metadata to store
    array.store_metadata()?;

    println!(
        "The array metadata is:\n{}\n",
        serde_json::to_string_pretty(&array.metadata()).unwrap()
    );

    // Write some chunks
    (0..2).into_par_iter().try_for_each(|i| {
        let chunk_indices: Vec<u64> = vec![0, i];
        let chunk_subset = array
            .chunk_grid()
            .subset(&chunk_indices, array.shape())?
            .ok_or_else(|| {
                zarrs::array::ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec())
            })?;
        array.store_chunk_elements(
            &chunk_indices,
            vec![i as f32 * 0.1; chunk_subset.num_elements() as usize],
        )
    })?;

    let subset_all = ArraySubset::new_with_shape(array.shape().to_vec());
    let data_all = array.retrieve_array_subset_ndarray::<f32>(&subset_all)?;
    println!("store_chunk [0, 0] and [0, 1]:\n{data_all:+4.1}\n");

    // Store multiple chunks
    array.store_chunks_elements::<f32>(
        &ArraySubset::new_with_ranges(&[1..2, 0..2]),
        vec![
            //
            1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1, 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,
            //
            1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1, 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,
        ],
    )?;
    let data_all = array.retrieve_array_subset_ndarray::<f32>(&subset_all)?;
    println!("store_chunks [1..2, 0..2]:\n{data_all:+4.1}\n");

    // Write a subset spanning multiple chunks, including updating chunks already written
    array.store_array_subset_elements::<f32>(
        &ArraySubset::new_with_ranges(&[3..6, 3..6]),
        vec![-3.3, -3.4, -3.5, -4.3, -4.4, -4.5, -5.3, -5.4, -5.5],
    )?;
    let data_all = array.retrieve_array_subset_ndarray::<f32>(&subset_all)?;
    println!("store_array_subset [3..6, 3..6]:\n{data_all:+4.1}\n");

    // Store array subset
    array.store_array_subset_elements::<f32>(
        &ArraySubset::new_with_ranges(&[0..8, 6..7]),
        vec![-0.6, -1.6, -2.6, -3.6, -4.6, -5.6, -6.6, -7.6],
    )?;
    let data_all = array.retrieve_array_subset_ndarray::<f32>(&subset_all)?;
    println!("store_array_subset [0..8, 6..7]:\n{data_all:+4.1}\n");

    // Store chunk subset
    array.store_chunk_subset_elements::<f32>(
        // chunk indices
        &[1, 1],
        // subset within chunk
        &ArraySubset::new_with_ranges(&[3..4, 0..4]),
        vec![-7.4, -7.5, -7.6, -7.7],
    )?;
    let data_all = array.retrieve_array_subset_ndarray::<f32>(&subset_all)?;
    println!("store_chunk_subset [3..4, 0..4] of chunk [1, 1]:\n{data_all:+4.1}\n");

    // Erase a chunk
    array.erase_chunk(&[0, 0])?;
    let data_all = array.retrieve_array_subset_ndarray::<f32>(&subset_all)?;
    println!("erase_chunk [0, 0]:\n{data_all:+4.1}\n");

    // Read a chunk
    let chunk_indices = vec![0, 1];
    let data_chunk = array.retrieve_chunk_ndarray::<f32>(&chunk_indices)?;
    println!("retrieve_chunk [0, 1]:\n{data_chunk:+4.1}\n");

    // Read chunks
    let chunks = ArraySubset::new_with_ranges(&[0..2, 1..2]);
    let data_chunks = array.retrieve_chunks_ndarray::<f32>(&chunks)?;
    println!("retrieve_chunks [0..2, 1..2]:\n{data_chunks:+4.1}\n");

    // Retrieve an array subset
    let subset = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
    let data_subset = array.retrieve_array_subset_ndarray::<f32>(&subset)?;
    println!("retrieve_array_subset [2..6, 3..5]:\n{data_subset:+4.1}\n");

    // Show the hierarchy
    let node = Node::new(&*store, "/").unwrap();
    let tree = node.hierarchy_tree();
    println!("hierarchy_tree:\n{}", tree);

    Ok(())
}
More examples
Hide additional examples
examples/array_write_read_ndarray.rs (line 83)
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
fn array_write_read() -> Result<(), Box<dyn std::error::Error>> {
    use std::sync::Arc;
    use zarrs::{
        array::{DataType, FillValue, ZARR_NAN_F32},
        array_subset::ArraySubset,
        node::Node,
        storage::store,
    };

    // Create a store
    // let path = tempfile::TempDir::new()?;
    // let mut store: ReadableWritableListableStorage = Arc::new(store::FilesystemStore::new(path.path())?);
    // let mut store: ReadableWritableListableStorage = Arc::new(store::FilesystemStore::new(
    //     "tests/data/array_write_read.zarr",
    // )?);
    let mut store: ReadableWritableListableStorage = Arc::new(store::MemoryStore::new());
    if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1) {
        if arg1 == "--usage-log" {
            let log_writer = Arc::new(std::sync::Mutex::new(
                // std::io::BufWriter::new(
                std::io::stdout(),
                //    )
            ));
            let usage_log = Arc::new(UsageLogStorageTransformer::new(log_writer, || {
                chrono::Utc::now().format("[%T%.3f] ").to_string()
            }));
            store = usage_log
                .clone()
                .create_readable_writable_listable_transformer(store);
        }
    }

    // Create a group
    let group_path = "/group";
    let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;

    // Update group metadata
    group
        .attributes_mut()
        .insert("foo".into(), serde_json::Value::String("bar".into()));

    // Write group metadata to store
    group.store_metadata()?;

    println!(
        "The group metadata is:\n{}\n",
        serde_json::to_string_pretty(&group.metadata()).unwrap()
    );

    // Create an array
    let array_path = "/group/array";
    let array = zarrs::array::ArrayBuilder::new(
        vec![8, 8], // array shape
        DataType::Float32,
        vec![4, 4].try_into()?, // regular chunk shape
        FillValue::from(ZARR_NAN_F32),
    )
    // .bytes_to_bytes_codecs(vec![]) // uncompressed
    .dimension_names(["y", "x"].into())
    // .storage_transformers(vec![].into())
    .build(store.clone(), array_path)?;

    // Write array metadata to store
    array.store_metadata()?;

    println!(
        "The array metadata is:\n{}\n",
        serde_json::to_string_pretty(&array.metadata()).unwrap()
    );

    // Write some chunks
    (0..2).into_par_iter().try_for_each(|i| {
        let chunk_indices: Vec<u64> = vec![0, i];
        let chunk_subset = array
            .chunk_grid()
            .subset(&chunk_indices, array.shape())?
            .ok_or_else(|| {
                zarrs::array::ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec())
            })?;
        array.store_chunk_ndarray(
            &chunk_indices,
            ArrayD::<f32>::from_shape_vec(
                chunk_subset.shape_usize(),
                vec![i as f32 * 0.1; chunk_subset.num_elements() as usize],
            )
            .unwrap(),
        )
    })?;

    let subset_all = ArraySubset::new_with_shape(array.shape().to_vec());
    let data_all = array.retrieve_array_subset_ndarray::<f32>(&subset_all)?;
    println!("store_chunk [0, 0] and [0, 1]:\n{data_all:+4.1}\n");

    // Store multiple chunks
    let ndarray_chunks: Array2<f32> = array![
        [1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,],
        [1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,],
        [1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,],
        [1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,],
    ];
    array.store_chunks_ndarray(&ArraySubset::new_with_ranges(&[1..2, 0..2]), ndarray_chunks)?;
    let data_all = array.retrieve_array_subset_ndarray::<f32>(&subset_all)?;
    println!("store_chunks [1..2, 0..2]:\n{data_all:+4.1}\n");

    // Write a subset spanning multiple chunks, including updating chunks already written
    let ndarray_subset: Array2<f32> =
        array![[-3.3, -3.4, -3.5,], [-4.3, -4.4, -4.5,], [-5.3, -5.4, -5.5],];
    array.store_array_subset_ndarray(
        ArraySubset::new_with_ranges(&[3..6, 3..6]).start(),
        ndarray_subset,
    )?;
    let data_all = array.retrieve_array_subset_ndarray::<f32>(&subset_all)?;
    println!("store_array_subset [3..6, 3..6]:\n{data_all:+4.1}\n");

    // Store array subset
    let ndarray_subset: Array2<f32> = array![
        [-0.6],
        [-1.6],
        [-2.6],
        [-3.6],
        [-4.6],
        [-5.6],
        [-6.6],
        [-7.6],
    ];
    array.store_array_subset_ndarray(
        ArraySubset::new_with_ranges(&[0..8, 6..7]).start(),
        ndarray_subset,
    )?;
    let data_all = array.retrieve_array_subset_ndarray::<f32>(&subset_all)?;
    println!("store_array_subset [0..8, 6..7]:\n{data_all:+4.1}\n");

    // Store chunk subset
    let ndarray_chunk_subset: Array2<f32> = array![[-7.4, -7.5, -7.6, -7.7],];
    array.store_chunk_subset_ndarray(
        // chunk indices
        &[1, 1],
        // subset within chunk
        ArraySubset::new_with_ranges(&[3..4, 0..4]).start(),
        ndarray_chunk_subset,
    )?;
    let data_all = array.retrieve_array_subset_ndarray::<f32>(&subset_all)?;
    println!("store_chunk_subset [3..4, 0..4] of chunk [1, 1]:\n{data_all:+4.1}\n");

    // Erase a chunk
    array.erase_chunk(&[0, 0])?;
    let data_all = array.retrieve_array_subset_ndarray::<f32>(&subset_all)?;
    println!("erase_chunk [0, 0]:\n{data_all:+4.1}\n");

    // Read a chunk
    let chunk_indices = vec![0, 1];
    let data_chunk = array.retrieve_chunk_ndarray::<f32>(&chunk_indices)?;
    println!("retrieve_chunk [0, 1]:\n{data_chunk:+4.1}\n");

    // Read chunks
    let chunks = ArraySubset::new_with_ranges(&[0..2, 1..2]);
    let data_chunks = array.retrieve_chunks_ndarray::<f32>(&chunks)?;
    println!("retrieve_chunks [0..2, 1..2]:\n{data_chunks:+4.1}\n");

    // Retrieve an array subset
    let subset = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
    let data_subset = array.retrieve_array_subset_ndarray::<f32>(&subset)?;
    println!("retrieve_array_subset [2..6, 3..5]:\n{data_subset:+4.1}\n");

    // Show the hierarchy
    let node = Node::new(&*store, "/").unwrap();
    let tree = node.hierarchy_tree();
    println!("hierarchy_tree:\n{}", tree);

    Ok(())
}
examples/async_array_write_read.rs (line 85)
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
async fn async_array_write_read() -> Result<(), Box<dyn std::error::Error>> {
    use futures::{stream::FuturesUnordered, StreamExt};
    use std::sync::Arc;
    use zarrs::{
        array::{DataType, FillValue, ZARR_NAN_F32},
        array_subset::ArraySubset,
        node::Node,
        storage::store,
    };

    // Create a store
    // let path = tempfile::TempDir::new()?;
    // let mut store: ReadableWritableListableStorage = Arc::new(store::AsyncFilesystemStore::new(path.path())?);
    // let mut store: ReadableWritableListableStorage = Arc::new(store::AsyncFilesystemStore::new(
    //     "tests/data/array_write_read.zarr",
    // )?);
    let mut store: AsyncReadableWritableListableStorage = Arc::new(store::AsyncObjectStore::new(
        object_store::memory::InMemory::new(),
    ));
    if let Some(arg1) = std::env::args().collect::<Vec<_>>().get(1) {
        if arg1 == "--usage-log" {
            let log_writer = Arc::new(std::sync::Mutex::new(
                // std::io::BufWriter::new(
                std::io::stdout(),
                //    )
            ));
            let usage_log = Arc::new(UsageLogStorageTransformer::new(log_writer, || {
                chrono::Utc::now().format("[%T%.3f] ").to_string()
            }));
            store = usage_log
                .clone()
                .create_async_readable_writable_listable_transformer(store);
        }
    }

    // Create a group
    let group_path = "/group";
    let mut group = zarrs::group::GroupBuilder::new().build(store.clone(), group_path)?;

    // Update group metadata
    group
        .attributes_mut()
        .insert("foo".into(), serde_json::Value::String("bar".into()));

    // Write group metadata to store
    group.async_store_metadata().await?;

    println!(
        "The group metadata is:\n{}\n",
        serde_json::to_string_pretty(&group.metadata())?
    );

    // Create an array
    let array_path = "/group/array";
    let array = zarrs::array::ArrayBuilder::new(
        vec![8, 8], // array shape
        DataType::Float32,
        vec![4, 4].try_into()?, // regular chunk shape
        FillValue::from(ZARR_NAN_F32),
    )
    // .bytes_to_bytes_codecs(vec![]) // uncompressed
    .dimension_names(["y", "x"].into())
    // .storage_transformers(vec![].into())
    .build(store.clone(), array_path)?;

    // Write array metadata to store
    array.async_store_metadata().await?;

    println!(
        "The array metadata is:\n{}\n",
        serde_json::to_string_pretty(&array.metadata())?
    );

    // Write some chunks
    let subsets = (0..2)
        .map(|i| {
            let chunk_indices: Vec<u64> = vec![0, i];
            array
                .chunk_grid()
                .subset(&chunk_indices, array.shape())?
                .ok_or_else(|| {
                    zarrs::array::ArrayError::InvalidChunkGridIndicesError(chunk_indices.to_vec())
                })
                .map(|chunk_subset| (i, chunk_indices, chunk_subset))
        })
        .collect::<Result<Vec<_>, _>>()?;
    let mut futures = subsets
        .iter()
        .map(|(i, chunk_indices, chunk_subset)| {
            array.async_store_chunk_elements(
                &chunk_indices,
                vec![*i as f32 * 0.1; chunk_subset.num_elements() as usize],
            )
        })
        .collect::<FuturesUnordered<_>>();
    while let Some(item) = futures.next().await {
        item?;
    }

    let subset_all = ArraySubset::new_with_shape(array.shape().to_vec());
    let data_all = array
        .async_retrieve_array_subset_ndarray::<f32>(&subset_all)
        .await?;
    println!("async_store_chunk [0, 0] and [0, 1]:\n{data_all:+4.1}\n");

    // Store multiple chunks
    array
        .async_store_chunks_elements::<f32>(
            &ArraySubset::new_with_ranges(&[1..2, 0..2]),
            vec![
                //
                1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1, 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,
                //
                1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1, 1.0, 1.0, 1.0, 1.0, 1.1, 1.1, 1.1, 1.1,
            ],
        )
        .await?;
    let data_all = array
        .async_retrieve_array_subset_ndarray::<f32>(&subset_all)
        .await?;
    println!("async_store_chunks [1..2, 0..2]:\n{data_all:+4.1}\n");

    // Write a subset spanning multiple chunks, including updating chunks already written
    array
        .async_store_array_subset_elements::<f32>(
            &ArraySubset::new_with_ranges(&[3..6, 3..6]),
            vec![-3.3, -3.4, -3.5, -4.3, -4.4, -4.5, -5.3, -5.4, -5.5],
        )
        .await?;
    let data_all = array
        .async_retrieve_array_subset_ndarray::<f32>(&subset_all)
        .await?;
    println!("async_store_array_subset [3..6, 3..6]:\n{data_all:+4.1}\n");

    // Store array subset
    array
        .async_store_array_subset_elements::<f32>(
            &ArraySubset::new_with_ranges(&[0..8, 6..7]),
            vec![-0.6, -1.6, -2.6, -3.6, -4.6, -5.6, -6.6, -7.6],
        )
        .await?;
    let data_all = array
        .async_retrieve_array_subset_ndarray::<f32>(&subset_all)
        .await?;
    println!("async_store_array_subset [0..8, 6..7]:\n{data_all:+4.1}\n");

    // Store chunk subset
    array
        .async_store_chunk_subset_elements::<f32>(
            // chunk indices
            &[1, 1],
            // subset within chunk
            &ArraySubset::new_with_ranges(&[3..4, 0..4]),
            vec![-7.4, -7.5, -7.6, -7.7],
        )
        .await?;
    let data_all = array
        .async_retrieve_array_subset_ndarray::<f32>(&subset_all)
        .await?;
    println!("async_store_chunk_subset [3..4, 0..4] of chunk [1, 1]:\n{data_all:+4.1}\n");

    // Erase a chunk
    array.async_erase_chunk(&[0, 0]).await?;
    let data_all = array
        .async_retrieve_array_subset_ndarray::<f32>(&subset_all)
        .await?;
    println!("async_erase_chunk [0, 0]:\n{data_all:+4.1}\n");

    // Read a chunk
    let chunk_indices = vec![0, 1];
    let data_chunk = array
        .async_retrieve_chunk_ndarray::<f32>(&chunk_indices)
        .await?;
    println!("async_retrieve_chunk [0, 1]:\n{data_chunk:+4.1}\n");

    // Read chunks
    let chunks = ArraySubset::new_with_ranges(&[0..2, 1..2]);
    let data_chunks = array.async_retrieve_chunks_ndarray::<f32>(&chunks).await?;
    println!("async_retrieve_chunks [0..2, 1..2]:\n{data_chunks:+4.1}\n");

    // Retrieve an array subset
    let subset = ArraySubset::new_with_ranges(&[2..6, 3..5]); // the center 4x2 region
    let data_subset = array
        .async_retrieve_array_subset_ndarray::<f32>(&subset)
        .await?;
    println!("async_retrieve_array_subset [2..6, 3..5]:\n{data_subset:+4.1}\n");

    // Show the hierarchy
    let node = Node::async_new(&*store, "/").await.unwrap();
    let tree = node.hierarchy_tree();
    println!("hierarchy_tree:\n{}", tree);

    Ok(())
}
source

fn chunks_subset( &self, chunks: &ArraySubset, array_shape: &[u64] ) -> Result<Option<ArraySubset>, IncompatibleDimensionalityError>

Return the ArraySubset of the chunks in chunks.

Returns None if the chunk subset cannot be determined.

§Errors

Returns IncompatibleDimensionalityError if chunks or array_shape do not match the dimensionality of the chunk grid.

source

fn chunk_indices( &self, array_indices: &[u64], array_shape: &[u64] ) -> Result<Option<ArrayIndices>, IncompatibleDimensionalityError>

The indices of a chunk which has the element at array_indices.

Returns None if the chunk indices cannot be determined.

§Errors

Returns IncompatibleDimensionalityError if array_indices or array_shape do not match the dimensionality of the chunk grid.

source

fn chunk_element_indices( &self, array_indices: &[u64], array_shape: &[u64] ) -> Result<Option<ArrayIndices>, IncompatibleDimensionalityError>

The indices within the chunk of the element at array_indices.

Returns None if the chunk element indices cannot be determined.

§Errors

Returns IncompatibleDimensionalityError if array_indices or array_shape do not match the dimensionality of the chunk grid.

source

fn array_indices_inbounds( &self, array_indices: &[u64], array_shape: &[u64] ) -> bool

Check if array indices are in-bounds.

Ensures array indices are within the array shape. Zero sized array dimensions are considered “unlimited” and always in-bounds.

source

fn chunk_indices_inbounds( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> bool

Check if chunk indices are in-bounds.

Ensures chunk grid indices are within the chunk grid shape. Zero sized array dimensions are considered “unlimited” and always in-bounds.

source

unsafe fn subset_unchecked( &self, chunk_indices: &[u64], array_shape: &[u64] ) -> Option<ArraySubset>

See ChunkGridTraits::subset.

§Safety

The length of chunk_indices must match the dimensionality of the chunk grid.

Implementors§