Struct bitcoin_connman::Connman
source · pub struct Connman {Show 48 fields
pub cs_total_bytes_recv: Amo<ConnmanTotalBytesRecv>,
pub cs_total_bytes_sent: Amo<ConnmanTotalBytesSent>,
pub peer_connect_timeout: Amo<Duration>,
pub whitelisted_range: Amo<Vec<NetWhitelistPermissions>>,
pub n_send_buffer_max_size: AtomicU32,
pub n_receive_flood_size: AtomicU32,
pub vh_listen_socket: Amo<Vec<ConnmanListenSocket>>,
pub network_active: AtomicBool,
pub addresses_initialized: AtomicBool,
pub addrman: Amo<AddrMan>,
pub addr_fetches_mutex: Amo<ConnmanAddrFetches>,
pub cs_v_added_nodes: Amo<ConnmanAddedNodes>,
pub nodes_disconnected: Amo<Vec<Amo<Box<dyn NodeInterface>>>>,
pub cs_v_nodes: Amo<ConnmanNodes>,
pub n_last_node_id: Atomic<NodeId>,
pub n_prev_node_count: AtomicU32,
pub addr_response_caches: HashMap<u64, ConnmanCachedAddrResponse>,
pub n_local_services: Amo<ServiceFlags>,
pub sem_outbound: Amo<Semaphore>,
pub sem_addnode: Amo<Semaphore>,
pub n_max_connections: AtomicI32,
pub max_outbound_full_relay: AtomicI32,
pub max_outbound_block_relay: AtomicI32,
pub n_max_addnode: AtomicI32,
pub n_max_feeler: AtomicI32,
pub max_outbound: AtomicI32,
pub use_addrman_outgoing: AtomicBool,
pub client_interface: Amo<ClientUIInterface>,
pub msgproc: Amo<Box<dyn NetEventsInterface>>,
pub banman: Amo<BanMan>,
pub anchors: Amo<Vec<Address>>,
pub n_seed0: u64,
pub n_seed1: u64,
pub cond_msg_proc: Condvar,
pub mutex_msg_proc: Amo<ConnmanMsgProc>,
pub flag_interrupt_msg_proc: AtomicBool,
pub interrupt_net: Amo<ThreadInterrupt>,
pub i2p_sam_session: Amo<Box<SAMSession>>,
pub thread_dns_address_seed: Amo<JoinHandle<()>>,
pub thread_socket_handler: Amo<JoinHandle<()>>,
pub thread_open_added_connections: Amo<JoinHandle<()>>,
pub thread_open_connections: Amo<JoinHandle<()>>,
pub thread_message_handler: Amo<JoinHandle<()>>,
pub thread_i2p_accept_incoming: Amo<JoinHandle<()>>,
pub try_another_outbound_peer: AtomicBool,
pub start_extra_block_relay_peers: AtomicBool,
pub next_send_inv_to_incoming: Atomic<OffsetDateTime>,
pub onion_binds: Amo<Vec<Service>>,
}
Fields§
§cs_total_bytes_recv: Amo<ConnmanTotalBytesRecv>
| Network usage totals |
cs_total_bytes_sent: Amo<ConnmanTotalBytesSent>
§peer_connect_timeout: Amo<Duration>
| P2P timeout in seconds |
whitelisted_range: Amo<Vec<NetWhitelistPermissions>>
| Whitelisted ranges. Any node connecting | from these is automatically whitelisted | (as well as those connecting to whitelisted | binds). |
n_send_buffer_max_size: AtomicU32
§n_receive_flood_size: AtomicU32
§vh_listen_socket: Amo<Vec<ConnmanListenSocket>>
§network_active: AtomicBool
§addresses_initialized: AtomicBool
§addrman: Amo<AddrMan>
§addr_fetches_mutex: Amo<ConnmanAddrFetches>
§cs_v_added_nodes: Amo<ConnmanAddedNodes>
§nodes_disconnected: Amo<Vec<Amo<Box<dyn NodeInterface>>>>
§cs_v_nodes: Amo<ConnmanNodes>
§n_last_node_id: Atomic<NodeId>
§n_prev_node_count: AtomicU32
§addr_response_caches: HashMap<u64, ConnmanCachedAddrResponse>
| Addr responses stored in different | caches per (network, local socket) | prevent cross-network node identification. | If a node for example is multi-homed | under Tor and IPv6, a single cache (or | no cache at all) would let an attacker | to easily detect that it is the same node | by comparing responses. Indexing by | local socket prevents leakage when | a node has multiple listening addresses | on the same network. | | The used memory equals to 1000 CAddress | records (or around 40 bytes) per distinct | Network (up to 5) we have/had an inbound | peer from, resulting in at most ~196 | KB. Every separate local socket may | add up to ~196 KB extra. |
n_local_services: Amo<ServiceFlags>
| Services this instance offers. | | This data is replicated in each Node | instance we create during peer connection | (in ConnectNode()) under a member also | called nLocalServices. | | This data is not marked const, but after | being set it should not change. See the | note in Node::nLocalServices documentation. | \sa Node::nLocalServices |
sem_outbound: Amo<Semaphore>
§sem_addnode: Amo<Semaphore>
§n_max_connections: AtomicI32
§max_outbound_full_relay: AtomicI32
| How many full-relay (tx, block, addr) | outbound peers we want |
max_outbound_block_relay: AtomicI32
| How many block-relay only outbound | peers we want | | We do not relay tx or addr messages with | these peers |
n_max_addnode: AtomicI32
§n_max_feeler: AtomicI32
§max_outbound: AtomicI32
§use_addrman_outgoing: AtomicBool
§client_interface: Amo<ClientUIInterface>
§msgproc: Amo<Box<dyn NetEventsInterface>>
§banman: Amo<BanMan>
| Pointer to this node’s banman. May be | nullptr - check existence before dereferencing. |
anchors: Amo<Vec<Address>>
| Addresses that were saved during the | previous clean shutdown. We’ll attempt | to make block-relay-only connections | to them. |
n_seed0: u64
| SipHasher seeds for deterministic | randomness |
n_seed1: u64
| SipHasher seeds for deterministic | randomness |
cond_msg_proc: Condvar
§mutex_msg_proc: Amo<ConnmanMsgProc>
§flag_interrupt_msg_proc: AtomicBool
§interrupt_net: Amo<ThreadInterrupt>
| This is signaled when network activity
| should cease.
|
| A pointer to it is saved in m_i2p_sam_session
,
| so make sure that the lifetime of interruptNet
| is not shorter than the lifetime of m_i2p_sam_session
.
|
i2p_sam_session: Amo<Box<SAMSession>>
| I2P SAM session. | | Used to accept incoming and make outgoing | I2P connections. |
thread_dns_address_seed: Amo<JoinHandle<()>>
§thread_socket_handler: Amo<JoinHandle<()>>
§thread_open_added_connections: Amo<JoinHandle<()>>
§thread_open_connections: Amo<JoinHandle<()>>
§thread_message_handler: Amo<JoinHandle<()>>
§thread_i2p_accept_incoming: Amo<JoinHandle<()>>
§try_another_outbound_peer: AtomicBool
| flag for deciding to connect to an extra | outbound peer, in excess of m_max_outbound_full_relay | | This takes the place of a feeler connection |
start_extra_block_relay_peers: AtomicBool
| flag for initiating extra block-relay-only | peer connections. this should only | be enabled after initial chain sync | has occurred, as these connections | are intended to be short-lived and low-bandwidth. |
next_send_inv_to_incoming: Atomic<OffsetDateTime>
§onion_binds: Amo<Vec<Service>>
| A vector of -bind=
:Implementations§
source§impl Connman
impl Connman
pub fn accept_connection(&self, h_listen_socket: &ConnmanListenSocket)
source§impl Connman
impl Connman
sourcepub fn add_connection(
&mut self,
address: &String,
conn_type: ConnectionType
) -> bool
pub fn add_connection( &mut self, address: &String, conn_type: ConnectionType ) -> bool
| Attempts to open a connection. Currently | only used from tests. | | ———– | @param[in] address | | Address of node to try connecting to | ––––– | @param[in] conn_type | | ConnectionType::OUTBOUND or ConnectionType::BLOCK_RELAY | or ConnectionType::ADDR_FETCH | | ———– | @return | | bool Returns false if there are no available | slots for this connection: | | - conn_type not a supported ConnectionType | | - Max total outbound connection capacity | filled | | - Max connection capacity for type is | filled |
source§impl Connman
impl Connman
pub fn add_whitelist_permission_flags( &self, flags: &mut NetPermissionFlags, addr: &NetAddr )
source§impl Connman
impl Connman
sourcepub fn attempt_to_evict_connection(&self) -> bool
pub fn attempt_to_evict_connection(&self) -> bool
| Try to find a connection to evict when | the node is full. | | Extreme care must be taken to avoid opening | the node to attacker triggered network | partitioning. | | The strategy used here is to protect | a small number of peers for each of several | distinct characteristics which are | difficult to forge. | | In order to partition a node the attacker | must be simultaneously better at all | of them than honest peers. |
source§impl Connman
impl Connman
pub fn bind( &self, addr: &Service, flags: u32, permissions: NetPermissionFlags ) -> bool
pub fn init_binds(&self, options: &ConnmanOptions) -> bool
source§impl Connman
impl Connman
pub fn bind_listen_port( &self, addr_bind: &Service, str_error: &mut BilingualStr, permissions: NetPermissionFlags ) -> bool
source§impl Connman
impl Connman
sourcepub fn already_connected_to_address(&self, addr: &Address) -> bool
pub fn already_connected_to_address(&self, addr: &Address) -> bool
| Determine whether we’re already connected | to a given address, in order to avoid | initiating duplicate connections. |
pub fn connect_node( &self, addr_connect: Address, psz_dest: *const u8, count_failure: bool, conn_type: ConnectionType ) -> Amo<Box<dyn NodeInterface>>
source§impl Connman
impl Connman
sourcepub fn create_node_from_accepted_socket(
&self,
h_socket: &mut CSocket,
permission_flags: NetPermissionFlags,
addr_bind: &Address,
addr: &Address
)
pub fn create_node_from_accepted_socket( &self, h_socket: &mut CSocket, permission_flags: NetPermissionFlags, addr_bind: &Address, addr: &Address )
| Create a Node
object from a socket
| that has just been accepted and add the
| node to the vNodes
member.
|
| ———–
| @param[in] hSocket
|
| Connected socket to communicate with
| the peer.
| –––––
| @param[in] permissionFlags
|
| The peer’s permissions.
| –––––
| @param[in] addr_bind
|
| The address and port at our side of the
| connection.
| –––––
| @param[in] addr
|
| The address and port at the peer’s side
| of the connection.
|
source§impl Connman
impl Connman
pub fn disconnect_node_with_str(&mut self, str_node: &String) -> bool
pub fn disconnect_node_with_subnet(&mut self, subnet: &SubNet) -> bool
pub fn disconnect_node_with_netaddr(&mut self, addr: &NetAddr) -> bool
pub fn disconnect_node_with_id(&mut self, id: NodeId) -> bool
pub fn disconnect_nodes(&self)
source§impl Connman
impl Connman
pub fn find_node_with_ip(&self, ip: &NetAddr) -> Amo<Box<dyn NodeInterface>>
pub fn find_node_with_subnet( &self, subnet: &SubNet ) -> Amo<Box<dyn NodeInterface>>
pub fn find_node_with_addr_name( &self, addr_name: &str ) -> Amo<Box<dyn NodeInterface>>
pub fn find_node_with_addr(&self, addr: &Service) -> Amo<Box<dyn NodeInterface>>
source§impl Connman
impl Connman
pub fn generate_select_set( &self, recv_set: &mut HashSet<CSocket>, send_set: &mut HashSet<CSocket>, error_set: &mut HashSet<CSocket> ) -> bool
source§impl Connman
impl Connman
pub fn get_new_node_id(&self) -> NodeId
pub fn get_node_count(&self, flags: ConnectionDirection) -> usize
pub fn get_node_stats(&self, vstats: &mut Vec<NodeStats>)
pub fn get_max_outbound_target(&self) -> u64
pub fn get_max_outbound_timeframe(&self) -> Duration
sourcepub fn get_max_outbound_time_left_in_cycle(&self) -> Duration
pub fn get_max_outbound_time_left_in_cycle(&self) -> Duration
| returns the time left in the current | max outbound cycle in case of no limit, | it will always return 0 |
sourcepub fn get_current_block_relay_only_conns(&self) -> Vec<Address>
pub fn get_current_block_relay_only_conns(&self) -> Vec<Address>
| Return vector of current BLOCK_RELAY | peers. |
sourcepub fn get_extra_block_relay_count(&self) -> i32
pub fn get_extra_block_relay_count(&self) -> i32
| Count the number of block-relay-only | peers we have over our limit. |
sourcepub fn get_extra_full_outbound_count(&self) -> i32
pub fn get_extra_full_outbound_count(&self) -> i32
| Return the number of peers we have over our | outbound connection limit | | Exclude peers that are marked for disconnect, | or are going to be disconnected soon (eg | ADDR_FETCH and FEELER) | | Also exclude peers that haven’t finished | initial connection handshake yet (so that we | don’t decide we’re over our desired connection | limit, and then evict some peer that has | finished the handshake) | | Return the number of outbound peers we have | in excess of our target (eg, if we | previously called | SetTryNewOutboundPeer(true), and have since | set to false, we may have extra peers that | we wish to disconnect). This may return | a value less than (num_outbound_connections | - num_outbound_slots) in cases where some | outbound connections are not yet fully | connected, or not yet fully disconnected.
pub fn get_try_new_outbound_peer(&self) -> bool
pub fn get_network_active(&self) -> bool
pub fn get_use_addrman_outgoing(&self) -> bool
sourcepub fn get_outbound_target_bytes_left(&self) -> u64
pub fn get_outbound_target_bytes_left(&self) -> u64
| response the bytes left in the current | max outbound cycle in case of no limit, | it will always response 0 |
pub fn get_total_bytes_recv(&self) -> u64
pub fn get_total_bytes_sent(&self) -> u64
sourcepub fn get_local_services(&self) -> ServiceFlags
pub fn get_local_services(&self) -> ServiceFlags
| Used to convey which local services we are
| offering peers during node connection.
|
| The data returned by this is used in Node
| construction, which is used to advertise
| which services we are offering that peer
| during
| net_processing.cpp:PushNodeVersion()
.
pub fn get_receive_flood_size(&self) -> u32
sourcepub fn get_deterministic_randomizer(&self, id: u64) -> SipHasher
pub fn get_deterministic_randomizer(&self, id: u64) -> SipHasher
| Get a unique deterministic randomizer. |
source§impl Connman
impl Connman
pub fn get_added_node_info(&self) -> Vec<AddedNodeInfo>
source§impl Connman
impl Connman
sourcepub fn get_addresses(
&self,
max_addresses: usize,
max_pct: usize,
network: Option<Network>
) -> Vec<Address>
pub fn get_addresses( &self, max_addresses: usize, max_pct: usize, network: Option<Network> ) -> Vec<Address>
| Return all or many randomly selected | addresses, optionally by network. | | ———– | @param[in] max_addresses | | Maximum number of addresses to return | (0 = all). | ––––– | @param[in] max_pct | | Maximum percentage of addresses to | return (0 = all). | ––––– | @param[in] network | | Select only addresses of this network | (nullopt = all). |
sourcepub fn get_addresses_with_requestor(
&mut self,
requestor: &mut AmoWriteGuard<'_, Box<dyn NodeInterface>>,
max_addresses: usize,
max_pct: usize
) -> Vec<Address>
pub fn get_addresses_with_requestor( &mut self, requestor: &mut AmoWriteGuard<'_, Box<dyn NodeInterface>>, max_addresses: usize, max_pct: usize ) -> Vec<Address>
| Cache is used to minimize topology leaks, | so it should be used for all non-trusted | calls, for example, p2p. | | A non-malicious call (from RPC or a peer | with addr permission) should call the | function without a parameter to avoid | using the cache. |
source§impl Connman
impl Connman
sourcepub fn should_run_inactivity_checks(
&self,
node: &AmoWriteGuard<'_, Box<dyn NodeInterface>>,
now: OffsetDateTime
) -> bool
pub fn should_run_inactivity_checks( &self, node: &AmoWriteGuard<'_, Box<dyn NodeInterface>>, now: OffsetDateTime ) -> bool
| Return true if we should disconnect | the peer for failing an inactivity check. |
sourcepub fn inactivity_check(
&self,
node: &AmoWriteGuard<'_, Box<dyn NodeInterface>>
) -> bool
pub fn inactivity_check( &self, node: &AmoWriteGuard<'_, Box<dyn NodeInterface>> ) -> bool
| Return true if the peer is inactive and | should be disconnected. |
source§impl Connman
impl Connman
pub fn for_node<'a>( &'a mut self, id: NodeId, func: &'a ConnmanForNodeFn ) -> bool
pub fn for_each_node<'a>(&'a mut self, func: &'a ConnmanNodeFn)
pub fn for_node_mut<'a>( &'a mut self, id: NodeId, func: &'a mut ConnmanForNodeFnMut ) -> bool
pub fn for_each_node_mut<'a>(&'a mut self, func: &'a mut ConnmanNodeFnMut)
source§impl Connman
impl Connman
pub fn notify_num_connections_changed(&self)
source§impl Connman
impl Connman
sourcepub fn open_network_connection(
&self,
addr_connect: &Address,
count_failure: bool,
grant_outbound: Option<&mut SemaphoreGrant>,
psz_dest: *const u8,
conn_type: ConnectionType
)
pub fn open_network_connection( &self, addr_connect: &Address, count_failure: bool, grant_outbound: Option<&mut SemaphoreGrant>, psz_dest: *const u8, conn_type: ConnectionType )
| if successful, this moves the passed | grant to the constructed node |
source§impl Connman
impl Connman
pub fn n_receive_flood_size(&self) -> u32
sourcepub fn outbound_target_reached(
&self,
historical_block_serving_limit: bool
) -> bool
pub fn outbound_target_reached( &self, historical_block_serving_limit: bool ) -> bool
| check if the outbound target is reached | | if param historicalBlockServingLimit is | set true, the function will response true | if the limit for serving historical blocks | has been reached
source§impl Connman
impl Connman
sourcepub fn poisson_next_send_inbound(
&mut self,
now: OffsetDateTime,
average_interval: Duration
) -> OffsetDateTime
pub fn poisson_next_send_inbound( &mut self, now: OffsetDateTime, average_interval: Duration ) -> OffsetDateTime
| Attempts to obfuscate tx time through | exponentially distributed emitting. | | Works assuming that a single interval | is used. | | Variable intervals will result in privacy | decrease. |
source§impl Connman
impl Connman
pub fn process_addr_fetch(&self)
source§impl Connman
impl Connman
pub fn push_message( &mut self, node: &mut Box<dyn NodeInterface>, msg: SerializedNetMsg )
source§impl Connman
impl Connman
pub fn record_bytes_recv(&self, bytes: u64)
pub fn record_bytes_sent(&self, bytes: u64)
source§impl Connman
impl Connman
sourcepub fn set_try_new_outbound_peer(&mut self, flag: bool)
pub fn set_try_new_outbound_peer(&mut self, flag: bool)
| This allows temporarily exceeding | m_max_outbound_full_relay, with the goal of | finding a peer that is better than all our | current peers.