1mod types;
5
6use crate::blocks::{Block, FullTipset, GossipBlock};
7use crate::libp2p::{IdentTopic, NetworkMessage, PUBSUB_BLOCK_STR};
8use crate::rpc::{ApiPaths, Ctx, Permission, RpcMethod, ServerError};
9use anyhow::{Context as _, anyhow};
10use cid::Cid;
11use enumflags2::BitFlags;
12use fvm_ipld_blockstore::Blockstore;
13use fvm_ipld_encoding::to_vec;
14pub use types::*;
15
16use crate::chain;
17use crate::chain_sync::{NodeSyncStatus, SyncStatusReport, TipsetValidator};
18
19pub enum SyncCheckBad {}
20impl RpcMethod<1> for SyncCheckBad {
21 const NAME: &'static str = "Filecoin.SyncCheckBad";
22 const PARAM_NAMES: [&'static str; 1] = ["cid"];
23 const API_PATHS: BitFlags<ApiPaths> = ApiPaths::all();
24 const PERMISSION: Permission = Permission::Read;
25
26 type Params = (Cid,);
27 type Ok = String;
28
29 async fn handle(
30 ctx: Ctx<impl Blockstore>,
31 (cid,): Self::Params,
32 ) -> Result<Self::Ok, ServerError> {
33 Ok(ctx
34 .bad_blocks
35 .as_ref()
36 .context("bad block cache is disabled")?
37 .peek(&cid)
38 .map(|_| "bad".to_string())
39 .unwrap_or_default())
40 }
41}
42
43pub enum SyncMarkBad {}
44impl RpcMethod<1> for SyncMarkBad {
45 const NAME: &'static str = "Filecoin.SyncMarkBad";
46 const PARAM_NAMES: [&'static str; 1] = ["cid"];
47 const API_PATHS: BitFlags<ApiPaths> = ApiPaths::all();
48 const PERMISSION: Permission = Permission::Admin;
49
50 type Params = (Cid,);
51 type Ok = ();
52
53 async fn handle(
54 ctx: Ctx<impl Blockstore>,
55 (cid,): Self::Params,
56 ) -> Result<Self::Ok, ServerError> {
57 ctx.bad_blocks
58 .as_ref()
59 .context("bad block cache is disabled")?
60 .push(cid);
61 Ok(())
62 }
63}
64
65pub enum SyncSnapshotProgress {}
66impl RpcMethod<0> for SyncSnapshotProgress {
67 const NAME: &'static str = "Forest.SyncSnapshotProgress";
68 const PARAM_NAMES: [&'static str; 0] = [];
69 const API_PATHS: BitFlags<ApiPaths> = ApiPaths::all();
70 const PERMISSION: Permission = Permission::Read;
71 const DESCRIPTION: Option<&'static str> =
72 Some("Returns the snapshot download progress. Return Null if the tracking isn't started");
73
74 type Params = ();
75 type Ok = SnapshotProgressState;
76
77 async fn handle(ctx: Ctx<impl Blockstore>, (): Self::Params) -> Result<Self::Ok, ServerError> {
78 Ok(ctx.get_snapshot_progress_tracker())
79 }
80}
81
82pub enum SyncStatus {}
83impl RpcMethod<0> for SyncStatus {
84 const NAME: &'static str = "Forest.SyncStatus";
85 const PARAM_NAMES: [&'static str; 0] = [];
86 const API_PATHS: BitFlags<ApiPaths> = ApiPaths::all();
87 const PERMISSION: Permission = Permission::Read;
88 const DESCRIPTION: Option<&'static str> = Some("Returns the current sync status of the node.");
89
90 type Params = ();
91 type Ok = SyncStatusReport;
92
93 async fn handle(ctx: Ctx<impl Blockstore>, (): Self::Params) -> Result<Self::Ok, ServerError> {
94 let sync_status = ctx.sync_status.as_ref().read().clone();
95 Ok(sync_status)
96 }
97}
98
99pub enum SyncSubmitBlock {}
100impl RpcMethod<1> for SyncSubmitBlock {
101 const NAME: &'static str = "Filecoin.SyncSubmitBlock";
102 const PARAM_NAMES: [&'static str; 1] = ["block"];
103 const API_PATHS: BitFlags<ApiPaths> = ApiPaths::all();
104 const PERMISSION: Permission = Permission::Write;
105 const DESCRIPTION: Option<&'static str> = Some("Submits a newly created block to the network.");
106
107 type Params = (GossipBlock,);
108 type Ok = ();
109
110 async fn handle(
113 ctx: Ctx<impl Blockstore>,
114 (block_msg,): Self::Params,
115 ) -> Result<Self::Ok, ServerError> {
116 if !matches!(ctx.sync_status.read().status, NodeSyncStatus::Synced) {
117 Err(anyhow!("the node isn't in 'follow' mode"))?
118 }
119 let genesis_network_name = ctx.chain_config().network.genesis_name();
120 let encoded_message = to_vec(&block_msg)?;
121 let pubsub_block_str = format!("{PUBSUB_BLOCK_STR}/{genesis_network_name}");
122 let (bls_messages, secp_messages) =
123 chain::store::block_messages(ctx.store(), &block_msg.header)?;
124 let block = Block {
125 header: block_msg.header.clone(),
126 bls_messages,
127 secp_messages,
128 };
129 let ts = FullTipset::from(block);
130 let genesis_ts = ctx.chain_store().genesis_tipset();
131
132 TipsetValidator(&ts)
133 .validate(
134 ctx.chain_store(),
135 ctx.bad_blocks.as_ref().map(AsRef::as_ref),
136 &genesis_ts,
137 ctx.chain_config().block_delay_secs,
138 )
139 .context("failed to validate the tipset")?;
140
141 ctx.tipset_send
142 .try_send(ts)
143 .context("tipset queue is full")?;
144
145 ctx.network_send().send(NetworkMessage::PubsubMessage {
146 topic: IdentTopic::new(pubsub_block_str),
147 message: encoded_message,
148 })?;
149 Ok(())
150 }
151}
152
153#[cfg(test)]
154mod tests {
155 use std::sync::Arc;
156
157 use super::*;
158 use crate::blocks::RawBlockHeader;
159 use crate::blocks::{CachingBlockHeader, Tipset};
160 use crate::chain::ChainStore;
161 use crate::chain_sync::network_context::SyncNetworkContext;
162 use crate::db::MemoryDB;
163 use crate::key_management::{KeyStore, KeyStoreConfig};
164 use crate::libp2p::{NetworkMessage, PeerManager};
165 use crate::message_pool::{MessagePool, MpoolRpcProvider};
166 use crate::networks::ChainConfig;
167 use crate::rpc::RPCState;
168 use crate::rpc::eth::filter::EthEventHandler;
169 use crate::shim::address::Address;
170 use crate::state_manager::StateManager;
171 use crate::utils::encoding::from_slice_with_fallback;
172 use parking_lot::RwLock;
173 use tokio::sync::mpsc;
174 use tokio::task::JoinSet;
175
176 fn ctx() -> (Arc<RPCState<MemoryDB>>, flume::Receiver<NetworkMessage>) {
177 let (network_send, network_rx) = flume::bounded(5);
178 let (tipset_send, _) = flume::bounded(5);
179 let mut services = JoinSet::new();
180 let db = Arc::new(MemoryDB::default());
181 let chain_config = Arc::new(ChainConfig::default());
182
183 let genesis_header = CachingBlockHeader::new(RawBlockHeader {
184 miner_address: Address::new_id(0),
185 timestamp: 7777,
186 ..Default::default()
187 });
188
189 let cs_arc = Arc::new(
190 ChainStore::new(db.clone(), db.clone(), db, chain_config, genesis_header).unwrap(),
191 );
192
193 let state_manager = Arc::new(StateManager::new(cs_arc.clone()).unwrap());
194 let state_manager_for_thread = state_manager.clone();
195 let cs_for_test = &cs_arc;
196 let mpool_network_send = network_send.clone();
197 let pool = {
198 let bz = hex::decode("904300e80781586082cb7477a801f55c1f2ea5e5d1167661feea60a39f697e1099af132682b81cc5047beacf5b6e80d5f52b9fd90323fb8510a5396416dd076c13c85619e176558582744053a3faef6764829aa02132a1571a76aabdc498a638ea0054d3bb57f41d82015860812d2396cc4592cdf7f829374b01ffd03c5469a4b0a9acc5ccc642797aa0a5498b97b28d90820fedc6f79ff0a6005f5c15dbaca3b8a45720af7ed53000555667207a0ccb50073cd24510995abd4c4e45c1e9e114905018b2da9454190499941e818201582012dd0a6a7d0e222a97926da03adb5a7768d31cc7c5c2bd6828e14a7d25fa3a608182004b76616c69642070726f6f6681d82a5827000171a0e4022030f89a8b0373ad69079dbcbc5addfe9b34dce932189786e50d3eb432ede3ba9c43000f0001d82a5827000171a0e4022052238c7d15c100c1b9ebf849541810c9e3c2d86e826512c6c416d2318fcd496dd82a5827000171a0e40220e5658b3d18cd06e1db9015b4b0ec55c123a24d5be1ea24d83938c5b8397b4f2fd82a5827000171a0e4022018d351341c302a21786b585708c9873565a0d07c42521d4aaf52da3ff6f2e461586102c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001a5f2c5439586102b5cd48724dce0fec8799d77fd6c5113276e7f470c8391faa0b5a6033a3eaf357d635705c36abe10309d73592727289680515afd9d424793ba4796b052682d21b03c5c8a37d94827fecc59cdc5750e198fdf20dee012f4d627c6665132298ab95004500053724e0").unwrap();
199 let header = from_slice_with_fallback::<CachingBlockHeader>(&bz).unwrap();
200 let ts = Tipset::from(header);
201 let db = cs_for_test.blockstore();
202 let tsk = ts.key();
203 cs_for_test.set_heaviest_tipset(ts.clone()).unwrap();
204
205 for i in tsk.to_cids() {
206 let bz2 = bz.clone();
207 db.put_keyed(&i, &bz2).unwrap();
208 }
209
210 let provider =
211 MpoolRpcProvider::new(cs_arc.publisher().clone(), state_manager_for_thread.clone());
212 MessagePool::new(
213 provider,
214 mpool_network_send,
215 Default::default(),
216 state_manager_for_thread.chain_config().clone(),
217 &mut services,
218 )
219 .unwrap()
220 };
221 let start_time = chrono::Utc::now();
222
223 let peer_manager = Arc::new(PeerManager::default());
224 let sync_network_context =
225 SyncNetworkContext::new(network_send, peer_manager, state_manager.blockstore_owned());
226 let state = Arc::new(RPCState {
227 state_manager,
228 keystore: Arc::new(RwLock::new(KeyStore::new(KeyStoreConfig::Memory).unwrap())),
229 mpool: Arc::new(pool),
230 bad_blocks: Some(Default::default()),
231 msgs_in_tipset: Default::default(),
232 sync_status: Arc::new(RwLock::new(SyncStatusReport::default())),
233 eth_event_handler: Arc::new(EthEventHandler::new()),
234 sync_network_context,
235 start_time,
236 shutdown: mpsc::channel(1).0, tipset_send,
238 snapshot_progress_tracker: Default::default(),
239 });
240 (state, network_rx)
241 }
242
243 #[tokio::test]
244 async fn set_check_bad() {
245 let (ctx, _) = ctx();
246
247 let cid = "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
248 .parse::<Cid>()
249 .unwrap();
250
251 let reason = SyncCheckBad::handle(ctx.clone(), (cid,)).await.unwrap();
252 assert_eq!(reason, "");
253
254 SyncMarkBad::handle(ctx.clone(), (cid,)).await.unwrap();
256
257 let reason = SyncCheckBad::handle(ctx.clone(), (cid,)).await.unwrap();
258 assert_eq!(reason, "bad");
259 }
260
261 #[tokio::test]
262 async fn sync_status_test() {
263 let (ctx, _) = ctx();
264
265 let st_copy = ctx.sync_status.clone();
266
267 let sync_status = SyncStatus::handle(ctx.clone(), ()).await.unwrap();
268 assert_eq!(sync_status, st_copy.as_ref().read().clone());
269
270 st_copy.write().status = NodeSyncStatus::Syncing;
272 st_copy.write().current_head_epoch = 4;
273
274 let sync_status = SyncStatus::handle(ctx.clone(), ()).await.unwrap();
275
276 assert_eq!(sync_status, st_copy.as_ref().read().clone());
277 }
278}