1use crate::hash::CryptoHash;
2use crate::merkle::MerklePath;
3use crate::sharding::{
4 ReceiptProof, ShardChunk, ShardChunkHeader, ShardChunkHeaderV1, ShardChunkV1,
5};
6use crate::state_part::{StatePart, StatePartV0};
7use crate::types::{BlockHeight, EpochId, ShardId, StateRoot, StateRootNode};
8use borsh::{BorshDeserialize, BorshSerialize};
9use near_primitives_core::types::{EpochHeight, ProtocolVersion};
10use near_primitives_core::version::ProtocolFeature;
11use near_schema_checker_lib::ProtocolSchema;
12use std::sync::Arc;
13
14#[derive(PartialEq, Eq, Clone, Debug, BorshSerialize, BorshDeserialize, ProtocolSchema)]
15pub struct ReceiptProofResponse(pub CryptoHash, pub Arc<Vec<ReceiptProof>>);
16
17#[derive(PartialEq, Eq, Clone, Debug, BorshSerialize, BorshDeserialize, ProtocolSchema)]
18pub struct RootProof(pub CryptoHash, pub MerklePath);
19
20#[derive(PartialEq, Eq, Clone, Debug, BorshSerialize, BorshDeserialize, ProtocolSchema)]
21pub struct StateHeaderKey(pub ShardId, pub CryptoHash);
22
23#[derive(PartialEq, Eq, Clone, Debug, BorshSerialize, BorshDeserialize, ProtocolSchema)]
24pub struct StatePartKey(pub CryptoHash, pub ShardId, pub u64 );
25
26#[derive(
27 Copy, PartialEq, Eq, Clone, Debug, Hash, BorshSerialize, BorshDeserialize, ProtocolSchema,
28)]
29pub enum PartIdOrHeader {
30 Part { part_id: u64 },
31 Header,
32}
33
34impl Into<&'static str> for PartIdOrHeader {
35 fn into(self) -> &'static str {
36 match self {
37 PartIdOrHeader::Part { .. } => "part",
38 PartIdOrHeader::Header => "header",
39 }
40 }
41}
42
43#[derive(Copy, PartialEq, Eq, Clone, Debug, BorshSerialize, BorshDeserialize, ProtocolSchema)]
44pub enum StateRequestAckBody {
45 WillRespond,
46 Busy,
47 Error,
48}
49
50impl Into<&'static str> for StateRequestAckBody {
51 fn into(self) -> &'static str {
52 match self {
53 StateRequestAckBody::WillRespond => "will_respond",
54 StateRequestAckBody::Busy => "busy",
55 StateRequestAckBody::Error => "error",
56 }
57 }
58}
59
60#[derive(PartialEq, Eq, Clone, Debug, BorshSerialize, BorshDeserialize, ProtocolSchema)]
61pub struct StateRequestAck {
62 pub shard_id: ShardId,
64 pub sync_hash: CryptoHash,
66 pub part_id_or_header: PartIdOrHeader,
68 pub body: StateRequestAckBody,
70}
71
72#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)]
73pub struct ShardStateSyncResponseHeaderV1 {
74 pub chunk: ShardChunkV1,
75 pub chunk_proof: MerklePath,
76 pub prev_chunk_header: Option<ShardChunkHeaderV1>,
77 pub prev_chunk_proof: Option<MerklePath>,
78 pub incoming_receipts_proofs: Vec<ReceiptProofResponse>,
79 pub root_proofs: Vec<Vec<RootProof>>,
80 pub state_root_node: StateRootNode,
81}
82
83#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)]
93pub struct ShardStateSyncResponseHeaderV2 {
94 pub chunk: ShardChunk,
97 pub chunk_proof: MerklePath,
100 pub prev_chunk_header: Option<ShardChunkHeader>,
102 pub prev_chunk_proof: Option<MerklePath>,
105 pub incoming_receipts_proofs: Vec<ReceiptProofResponse>,
109 pub root_proofs: Vec<Vec<RootProof>>,
120 pub state_root_node: StateRootNode,
123}
124
125#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)]
126#[borsh(use_discriminant = true)]
127#[repr(u8)]
128pub enum CachedParts {
129 AllParts = 0,
130 NoParts = 1,
131 BitArray(BitArray) = 2,
135}
136
137#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)]
139pub struct BitArray {
140 data: Vec<u8>,
141 capacity: u64,
142}
143
144impl BitArray {
145 pub fn new(capacity: u64) -> Self {
146 let num_bytes = (capacity + 7) / 8;
147 Self { data: vec![0; num_bytes as usize], capacity }
148 }
149}
150
151#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
152#[borsh(use_discriminant = true)]
153#[repr(u8)]
154#[allow(clippy::large_enum_variant)]
155pub enum ShardStateSyncResponseHeader {
156 V1(ShardStateSyncResponseHeaderV1) = 0,
157 V2(ShardStateSyncResponseHeaderV2) = 1,
158}
159
160impl ShardStateSyncResponseHeader {
161 #[inline]
162 pub fn take_chunk(self) -> ShardChunk {
163 match self {
164 Self::V1(header) => ShardChunk::V1(header.chunk),
165 Self::V2(header) => header.chunk,
166 }
167 }
168
169 #[inline]
170 pub fn cloned_chunk(&self) -> ShardChunk {
171 match self {
172 Self::V1(header) => ShardChunk::V1(header.chunk.clone()),
173 Self::V2(header) => header.chunk.clone(),
174 }
175 }
176
177 #[inline]
178 pub fn cloned_prev_chunk_header(&self) -> Option<ShardChunkHeader> {
179 match self {
180 Self::V1(header) => header.prev_chunk_header.clone().map(ShardChunkHeader::V1),
181 Self::V2(header) => header.prev_chunk_header.clone(),
182 }
183 }
184
185 #[inline]
186 pub fn chunk_height_included(&self) -> BlockHeight {
187 match self {
188 Self::V1(header) => header.chunk.header.height_included,
189 Self::V2(header) => header.chunk.height_included(),
190 }
191 }
192
193 #[inline]
194 pub fn chunk_prev_state_root(&self) -> StateRoot {
195 match self {
196 Self::V1(header) => header.chunk.header.inner.prev_state_root,
197 Self::V2(header) => header.chunk.prev_state_root(),
198 }
199 }
200
201 #[inline]
202 pub fn chunk_proof(&self) -> &MerklePath {
203 match self {
204 Self::V1(header) => &header.chunk_proof,
205 Self::V2(header) => &header.chunk_proof,
206 }
207 }
208
209 #[inline]
210 pub fn prev_chunk_proof(&self) -> &Option<MerklePath> {
211 match self {
212 Self::V1(header) => &header.prev_chunk_proof,
213 Self::V2(header) => &header.prev_chunk_proof,
214 }
215 }
216
217 #[inline]
218 pub fn incoming_receipts_proofs(&self) -> &[ReceiptProofResponse] {
219 match self {
220 Self::V1(header) => &header.incoming_receipts_proofs,
221 Self::V2(header) => &header.incoming_receipts_proofs,
222 }
223 }
224
225 #[inline]
226 pub fn root_proofs(&self) -> &[Vec<RootProof>] {
227 match self {
228 Self::V1(header) => &header.root_proofs,
229 Self::V2(header) => &header.root_proofs,
230 }
231 }
232
233 #[inline]
234 pub fn state_root_node(&self) -> &StateRootNode {
235 match self {
236 Self::V1(header) => &header.state_root_node,
237 Self::V2(header) => &header.state_root_node,
238 }
239 }
240
241 pub fn num_state_parts(&self) -> u64 {
242 get_num_state_parts(self.state_root_node().memory_usage)
243 }
244}
245
246#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)]
247pub struct ShardStateSyncResponseV1 {
248 pub header: Option<ShardStateSyncResponseHeaderV1>,
249 pub part: Option<(u64, Vec<u8>)>,
250}
251
252impl ShardStateSyncResponseV1 {
253 pub fn part_id(&self) -> Option<u64> {
254 self.part.as_ref().map(|(part_id, _)| *part_id)
255 }
256
257 pub fn payload_length(&self) -> Option<usize> {
258 self.part.as_ref().map(|(_, part)| part.len())
259 }
260}
261
262#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)]
263pub struct ShardStateSyncResponseV2 {
264 pub header: Option<ShardStateSyncResponseHeaderV2>,
265 pub part: Option<(u64, Vec<u8>)>,
266}
267
268#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)]
269pub struct ShardStateSyncResponseV3 {
270 pub header: Option<ShardStateSyncResponseHeaderV2>,
271 pub part: Option<(u64, Vec<u8>)>,
272 pub cached_parts: Option<CachedParts>,
273 pub can_generate: bool,
274}
275
276#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)]
278pub struct ShardStateSyncResponseV4 {
279 pub header: Option<ShardStateSyncResponseHeaderV2>,
280 pub part: Option<(u64, StatePart)>,
281}
282
283#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)]
284#[borsh(use_discriminant = true)]
285#[repr(u8)]
286pub enum ShardStateSyncResponse {
287 V1(ShardStateSyncResponseV1) = 0,
288 V2(ShardStateSyncResponseV2) = 1,
289 V3(ShardStateSyncResponseV3) = 2,
290 V4(ShardStateSyncResponseV4) = 3,
291}
292
293impl ShardStateSyncResponse {
294 pub fn new_from_header(
295 header: Option<ShardStateSyncResponseHeaderV2>,
296 protocol_version: ProtocolVersion,
297 ) -> Self {
298 Self::new_from_header_or_part(header, None, protocol_version)
299 }
300
301 pub fn new_from_part(
302 part: Option<(u64, StatePart)>,
303 protocol_version: ProtocolVersion,
304 ) -> Self {
305 Self::new_from_header_or_part(None, part, protocol_version)
306 }
307
308 fn new_from_header_or_part(
309 header: Option<ShardStateSyncResponseHeaderV2>,
310 part: Option<(u64, StatePart)>,
311 protocol_version: ProtocolVersion,
312 ) -> Self {
313 if ProtocolFeature::StatePartsCompression.enabled(protocol_version) {
314 return Self::V4(ShardStateSyncResponseV4 { header, part });
315 }
316 let part = match part {
317 None => None,
318 Some((part_id, StatePart::V0(part))) => Some((part_id, part.0)),
319 _ => panic!("StatePartsCompression not supported and part={part:?}"),
322 };
323 Self::V3(ShardStateSyncResponseV3 { header, part, cached_parts: None, can_generate: false })
324 }
325
326 pub fn take_header(self) -> Option<ShardStateSyncResponseHeader> {
327 match self {
328 Self::V1(response) => response.header.map(ShardStateSyncResponseHeader::V1),
329 Self::V2(response) => response.header.map(ShardStateSyncResponseHeader::V2),
330 Self::V3(response) => response.header.map(ShardStateSyncResponseHeader::V2),
331 Self::V4(response) => response.header.map(ShardStateSyncResponseHeader::V2),
332 }
333 }
334
335 pub fn part_id(&self) -> Option<u64> {
336 match self {
337 Self::V1(response) => response.part.as_ref().map(|(part_id, _)| *part_id),
338 Self::V2(response) => response.part.as_ref().map(|(part_id, _)| *part_id),
339 Self::V3(response) => response.part.as_ref().map(|(part_id, _)| *part_id),
340 Self::V4(response) => response.part.as_ref().map(|(part_id, _)| *part_id),
341 }
342 }
343
344 pub fn take_part(self) -> Option<(u64, StatePart)> {
345 match self {
346 Self::V1(response) => {
347 response.part.map(|(idx, part)| (idx, StatePart::V0(StatePartV0(part))))
348 }
349 Self::V2(response) => {
350 response.part.map(|(idx, part)| (idx, StatePart::V0(StatePartV0(part))))
351 }
352 Self::V3(response) => {
353 response.part.map(|(idx, part)| (idx, StatePart::V0(StatePartV0(part))))
354 }
355 Self::V4(response) => response.part,
356 }
357 }
358
359 pub fn payload_length(&self) -> Option<usize> {
360 match self {
361 Self::V1(response) => response.part.as_ref().map(|(_, part)| part.len()),
362 Self::V2(response) => response.part.as_ref().map(|(_, part)| part.len()),
363 Self::V3(response) => response.part.as_ref().map(|(_, part)| part.len()),
364 Self::V4(response) => response.part.as_ref().map(|(_, part)| part.payload_length()),
365 }
366 }
367}
368
369pub const STATE_PART_MEMORY_LIMIT: bytesize::ByteSize = bytesize::ByteSize(30 * bytesize::MIB);
370
371pub fn get_num_state_parts(memory_usage: u64) -> u64 {
372 (memory_usage + STATE_PART_MEMORY_LIMIT.as_u64() - 1) / STATE_PART_MEMORY_LIMIT.as_u64()
373}
374
375#[derive(BorshSerialize, BorshDeserialize, Debug, Clone, serde::Serialize, ProtocolSchema)]
376#[borsh(use_discriminant = true)]
377#[repr(u8)]
378pub enum StateSyncDumpProgress {
380 AllDumped {
384 epoch_id: EpochId,
386 epoch_height: EpochHeight,
387 } = 0,
388 Skipped { epoch_id: EpochId, epoch_height: EpochHeight } = 1,
390 InProgress {
392 epoch_id: EpochId,
394 epoch_height: EpochHeight,
395 sync_hash: CryptoHash,
398 } = 2,
399}
400
401#[cfg(test)]
402mod tests {
403 use crate::state_sync::{STATE_PART_MEMORY_LIMIT, get_num_state_parts};
404
405 #[test]
406 fn test_get_num_state_parts() {
407 assert_eq!(get_num_state_parts(0), 0);
408 assert_eq!(get_num_state_parts(1), 1);
409 assert_eq!(get_num_state_parts(STATE_PART_MEMORY_LIMIT.as_u64()), 1);
410 assert_eq!(get_num_state_parts(STATE_PART_MEMORY_LIMIT.as_u64() + 1), 2);
411 assert_eq!(get_num_state_parts(STATE_PART_MEMORY_LIMIT.as_u64() * 100), 100);
412 assert_eq!(get_num_state_parts(STATE_PART_MEMORY_LIMIT.as_u64() * 100 + 1), 101);
413 }
414}