Bitcoin Core 28.0.0
P2P Digital Currency
Loading...
Searching...
No Matches
net_processing.cpp
Go to the documentation of this file.
1// Copyright (c) 2009-2010 Satoshi Nakamoto
2// Copyright (c) 2009-2022 The Bitcoin Core developers
3// Distributed under the MIT software license, see the accompanying
4// file COPYING or http://www.opensource.org/licenses/mit-license.php.
5
6#include <net_processing.h>
7
8#include <addrman.h>
9#include <banman.h>
10#include <blockencodings.h>
11#include <blockfilter.h>
12#include <chainparams.h>
13#include <consensus/amount.h>
15#include <deploymentstatus.h>
16#include <hash.h>
17#include <headerssync.h>
19#include <kernel/chain.h>
21#include <logging.h>
22#include <merkleblock.h>
23#include <netbase.h>
24#include <netmessagemaker.h>
25#include <node/blockstorage.h>
26#include <node/timeoffsets.h>
28#include <node/warnings.h>
29#include <policy/fees.h>
30#include <policy/policy.h>
31#include <policy/settings.h>
32#include <primitives/block.h>
34#include <random.h>
35#include <scheduler.h>
36#include <streams.h>
37#include <sync.h>
38#include <tinyformat.h>
39#include <txmempool.h>
40#include <txorphanage.h>
41#include <txrequest.h>
42#include <util/check.h>
43#include <util/strencodings.h>
44#include <util/time.h>
45#include <util/trace.h>
46#include <validation.h>
47
48#include <algorithm>
49#include <atomic>
50#include <future>
51#include <memory>
52#include <optional>
53#include <ranges>
54#include <typeinfo>
55#include <utility>
56
59static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
60static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
62static constexpr auto HEADERS_RESPONSE_TIME{2min};
68static constexpr auto CHAIN_SYNC_TIMEOUT{20min};
70static constexpr auto STALE_CHECK_INTERVAL{10min};
72static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
74static constexpr auto MINIMUM_CONNECT_TIME{30s};
76static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
79static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
82static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
84static constexpr auto PING_INTERVAL{2min};
86static const unsigned int MAX_LOCATOR_SZ = 101;
88static const unsigned int MAX_INV_SZ = 50000;
91static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT = 100;
96static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 5000;
98static constexpr auto TXID_RELAY_DELAY{2s};
100static constexpr auto NONPREF_PEER_TX_DELAY{2s};
102static constexpr auto OVERLOADED_PEER_TX_DELAY{2s};
104static constexpr auto GETDATA_TX_INTERVAL{60s};
106static const unsigned int MAX_GETDATA_SZ = 1000;
108static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
111static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
113static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
116static const unsigned int MAX_HEADERS_RESULTS = 2000;
119static const int MAX_CMPCTBLOCK_DEPTH = 5;
121static const int MAX_BLOCKTXN_DEPTH = 10;
122static_assert(MAX_BLOCKTXN_DEPTH <= MIN_BLOCKS_TO_KEEP, "MAX_BLOCKTXN_DEPTH too high");
127static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
129static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
131static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
133static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
135static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
137static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS = 144;
139static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
141static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s};
143static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h};
146static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s};
153static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
157static constexpr unsigned int INVENTORY_BROADCAST_MAX = 1000;
158static_assert(INVENTORY_BROADCAST_MAX >= INVENTORY_BROADCAST_TARGET, "INVENTORY_BROADCAST_MAX too low");
159static_assert(INVENTORY_BROADCAST_MAX <= MAX_PEER_TX_ANNOUNCEMENTS, "INVENTORY_BROADCAST_MAX too high");
161static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min};
163static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min};
165static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
167static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
169static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
171static constexpr size_t MAX_ADDR_TO_SEND{1000};
174static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
180static constexpr uint64_t CMPCTBLOCKS_VERSION{2};
181
182// Internal stuff
183namespace {
185struct QueuedBlock {
187 const CBlockIndex* pindex;
189 std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
190};
191
204struct Peer {
206 const NodeId m_id{0};
207
221 const ServiceFlags m_our_services;
223 std::atomic<ServiceFlags> m_their_services{NODE_NONE};
224
226 Mutex m_misbehavior_mutex;
228 bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
229
231 Mutex m_block_inv_mutex;
235 std::vector<uint256> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
239 std::vector<uint256> m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
244 uint256 m_continuation_block GUARDED_BY(m_block_inv_mutex) {};
245
247 bool m_outbound_version_message_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
248
250 std::atomic<int> m_starting_height{-1};
251
253 std::atomic<uint64_t> m_ping_nonce_sent{0};
255 std::atomic<std::chrono::microseconds> m_ping_start{0us};
257 std::atomic<bool> m_ping_queued{false};
258
260 std::atomic<bool> m_wtxid_relay{false};
267 std::chrono::microseconds m_next_send_feefilter GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
268
269 struct TxRelay {
270 mutable RecursiveMutex m_bloom_filter_mutex;
272 bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
274 std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr};
275
276 mutable RecursiveMutex m_tx_inventory_mutex;
280 CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
285 std::set<uint256> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
289 bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
292 std::chrono::microseconds m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0};
295 uint64_t m_last_inv_sequence GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1};
296
298 std::atomic<CAmount> m_fee_filter_received{0};
299 };
300
301 /* Initializes a TxRelay struct for this peer. Can be called at most once for a peer. */
302 TxRelay* SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
303 {
304 LOCK(m_tx_relay_mutex);
305 Assume(!m_tx_relay);
306 m_tx_relay = std::make_unique<Peer::TxRelay>();
307 return m_tx_relay.get();
308 };
309
310 TxRelay* GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
311 {
312 return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
313 };
314
316 std::vector<CAddress> m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
326 std::unique_ptr<CRollingBloomFilter> m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
341 std::atomic_bool m_addr_relay_enabled{false};
343 bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
345 mutable Mutex m_addr_send_times_mutex;
347 std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
349 std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
352 std::atomic_bool m_wants_addrv2{false};
354 bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
357 double m_addr_token_bucket GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1.0};
359 std::chrono::microseconds m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){GetTime<std::chrono::microseconds>()};
361 std::atomic<uint64_t> m_addr_rate_limited{0};
363 std::atomic<uint64_t> m_addr_processed{0};
364
366 bool m_inv_triggered_getheaders_before_sync GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
367
369 Mutex m_getdata_requests_mutex;
371 std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
372
375
377 Mutex m_headers_sync_mutex;
380 std::unique_ptr<HeadersSyncState> m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex) GUARDED_BY(m_headers_sync_mutex) {};
381
383 std::atomic<bool> m_sent_sendheaders{false};
384
386 std::chrono::microseconds m_headers_sync_timeout GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us};
387
389 bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
390
393 std::atomic<std::chrono::seconds> m_time_offset{0s};
394
395 explicit Peer(NodeId id, ServiceFlags our_services)
396 : m_id{id}
397 , m_our_services{our_services}
398 {}
399
400private:
401 mutable Mutex m_tx_relay_mutex;
402
404 std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex);
405};
406
407using PeerRef = std::shared_ptr<Peer>;
408
415struct CNodeState {
417 const CBlockIndex* pindexBestKnownBlock{nullptr};
419 uint256 hashLastUnknownBlock{};
421 const CBlockIndex* pindexLastCommonBlock{nullptr};
423 const CBlockIndex* pindexBestHeaderSent{nullptr};
425 bool fSyncStarted{false};
427 std::chrono::microseconds m_stalling_since{0us};
428 std::list<QueuedBlock> vBlocksInFlight;
430 std::chrono::microseconds m_downloading_since{0us};
432 bool fPreferredDownload{false};
434 bool m_requested_hb_cmpctblocks{false};
436 bool m_provides_cmpctblocks{false};
437
462 struct ChainSyncTimeoutState {
464 std::chrono::seconds m_timeout{0s};
466 const CBlockIndex* m_work_header{nullptr};
468 bool m_sent_getheaders{false};
470 bool m_protect{false};
471 };
472
473 ChainSyncTimeoutState m_chain_sync;
474
476 int64_t m_last_block_announcement{0};
477
479 const bool m_is_inbound;
480
481 CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
482};
483
484class PeerManagerImpl final : public PeerManager
485{
486public:
487 PeerManagerImpl(CConnman& connman, AddrMan& addrman,
488 BanMan* banman, ChainstateManager& chainman,
489 CTxMemPool& pool, node::Warnings& warnings, Options opts);
490
492 void ActiveTipChange(const CBlockIndex& new_tip, bool) override
493 EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
494 void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override
495 EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
496 void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override
497 EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex);
498 void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
499 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
500 void BlockChecked(const CBlock& block, const BlockValidationState& state) override
501 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
502 void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override
503 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
504
506 void InitializeNode(const CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_tx_download_mutex);
507 void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, !m_tx_download_mutex);
508 bool HasAllDesirableServiceFlags(ServiceFlags services) const override;
509 bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override
510 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex);
511 bool SendMessages(CNode* pto) override
512 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, g_msgproc_mutex, !m_tx_download_mutex);
513
515 void StartScheduledTasks(CScheduler& scheduler) override;
516 void CheckForStaleTipAndEvictPeers() override;
517 std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override
518 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
519 bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
520 PeerManagerInfo GetInfo() const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
521 void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
522 void RelayTransaction(const uint256& txid, const uint256& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
523 void SetBestBlock(int height, std::chrono::seconds time) override
524 {
525 m_best_height = height;
526 m_best_block_time = time;
527 };
528 void UnitTestMisbehaving(NodeId peer_id) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), ""); };
529 void ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv,
530 const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override
531 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex);
532 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override;
533 ServiceFlags GetDesirableServiceFlags(ServiceFlags services) const override;
534
535private:
537 void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex);
538
540 void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
541
543 void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
544
547 PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
548
551 PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
552
555 void Misbehaving(Peer& peer, const std::string& message);
556
565 void MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
566 bool via_compact_block, const std::string& message = "")
567 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
568
572 void MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
573 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
574
581 bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer);
582
588 void ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result,
589 bool maybe_add_extra_compact_tx)
590 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex);
591
594 void ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions)
595 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex);
596
597 struct PackageToValidate {
598 const Package m_txns;
599 const std::vector<NodeId> m_senders;
601 explicit PackageToValidate(const CTransactionRef& parent,
602 const CTransactionRef& child,
603 NodeId parent_sender,
604 NodeId child_sender) :
605 m_txns{parent, child},
606 m_senders {parent_sender, child_sender}
607 {}
608
609 std::string ToString() const {
610 Assume(m_txns.size() == 2);
611 return strprintf("parent %s (wtxid=%s, sender=%d) + child %s (wtxid=%s, sender=%d)",
612 m_txns.front()->GetHash().ToString(),
613 m_txns.front()->GetWitnessHash().ToString(),
614 m_senders.front(),
615 m_txns.back()->GetHash().ToString(),
616 m_txns.back()->GetWitnessHash().ToString(),
617 m_senders.back());
618 }
619 };
620
624 void ProcessPackageResult(const PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result)
625 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex);
626
630 std::optional<PackageToValidate> Find1P1CPackage(const CTransactionRef& ptx, NodeId nodeid)
631 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex);
632
644 bool ProcessOrphanTx(Peer& peer)
645 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, !m_tx_download_mutex);
646
654 void ProcessHeadersMessage(CNode& pfrom, Peer& peer,
655 std::vector<CBlockHeader>&& headers,
656 bool via_compact_block)
657 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
660 bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer);
662 arith_uint256 GetAntiDoSWorkThreshold();
666 void HandleUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
668 bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const;
687 bool IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom,
688 std::vector<CBlockHeader>& headers)
689 EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
701 bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom,
702 const CBlockIndex* chain_start_header,
703 std::vector<CBlockHeader>& headers)
704 EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
705
708 bool IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
709
714 bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
716 void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header);
718 void UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
719 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
720
721 void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req);
722
726 void AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
727 EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_tx_download_mutex);
728
730 void PushMessage(CNode& node, CSerializedNetMsg&& msg) const { m_connman.PushMessage(&node, std::move(msg)); }
731 template <typename... Args>
732 void MakeAndPushMessage(CNode& node, std::string msg_type, Args&&... args) const
733 {
734 m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...));
735 }
736
738 void PushNodeVersion(CNode& pnode, const Peer& peer);
739
744 void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now);
745
747 void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
748
750 void MaybeSendSendHeaders(CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
751
759 void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
760
762 void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
763
765
767
768 const CChainParams& m_chainparams;
769 CConnman& m_connman;
770 AddrMan& m_addrman;
772 BanMan* const m_banman;
773 ChainstateManager& m_chainman;
774 CTxMemPool& m_mempool;
775
784 Mutex m_tx_download_mutex ACQUIRED_BEFORE(m_mempool.cs);
785 TxRequestTracker m_txrequest GUARDED_BY(m_tx_download_mutex);
786 std::unique_ptr<TxReconciliationTracker> m_txreconciliation;
787
789 std::atomic<int> m_best_height{-1};
791 std::atomic<std::chrono::seconds> m_best_block_time{0s};
792
794 std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s};
795
796 node::Warnings& m_warnings;
797 TimeOffsets m_outbound_time_offsets{m_warnings};
798
799 const Options m_opts;
800
801 bool RejectIncomingTxs(const CNode& peer) const;
802
805 bool m_initial_sync_finished GUARDED_BY(cs_main){false};
806
809 mutable Mutex m_peer_mutex;
816 std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
817
819 std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main);
820
822 const CNodeState* State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
824 CNodeState* State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
825
826 uint32_t GetFetchFlags(const Peer& peer) const;
827
828 std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
829
831 int nSyncStarted GUARDED_BY(cs_main) = 0;
832
834 uint256 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){};
835
842 std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
843
845 std::atomic<int> m_wtxid_relay_peers{0};
846
848 int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
849
851 int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
852
854 std::atomic<std::chrono::seconds> m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT};
855
863 bool AlreadyHaveTx(const GenTxid& gtxid, bool include_reconsiderable)
864 EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex);
865
900 std::unique_ptr<CRollingBloomFilter> m_lazy_recent_rejects GUARDED_BY(m_tx_download_mutex){nullptr};
901
902 CRollingBloomFilter& RecentRejectsFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex)
903 {
904 AssertLockHeld(m_tx_download_mutex);
905
906 if (!m_lazy_recent_rejects) {
907 m_lazy_recent_rejects = std::make_unique<CRollingBloomFilter>(120'000, 0.000'001);
908 }
909
910 return *m_lazy_recent_rejects;
911 }
912
933 std::unique_ptr<CRollingBloomFilter> m_lazy_recent_rejects_reconsiderable GUARDED_BY(m_tx_download_mutex){nullptr};
934
935 CRollingBloomFilter& RecentRejectsReconsiderableFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex)
936 {
937 AssertLockHeld(m_tx_download_mutex);
938
939 if (!m_lazy_recent_rejects_reconsiderable) {
940 m_lazy_recent_rejects_reconsiderable = std::make_unique<CRollingBloomFilter>(120'000, 0.000'001);
941 }
942
943 return *m_lazy_recent_rejects_reconsiderable;
944 }
945
946 /*
947 * Filter for transactions that have been recently confirmed.
948 * We use this to avoid requesting transactions that have already been
949 * confirnmed.
950 *
951 * Blocks don't typically have more than 4000 transactions, so this should
952 * be at least six blocks (~1 hr) worth of transactions that we can store,
953 * inserting both a txid and wtxid for every observed transaction.
954 * If the number of transactions appearing in a block goes up, or if we are
955 * seeing getdata requests more than an hour after initial announcement, we
956 * can increase this number.
957 * The false positive rate of 1/1M should come out to less than 1
958 * transaction per day that would be inadvertently ignored (which is the
959 * same probability that we have in the reject filter).
960 */
961 std::unique_ptr<CRollingBloomFilter> m_lazy_recent_confirmed_transactions GUARDED_BY(m_tx_download_mutex){nullptr};
962
963 CRollingBloomFilter& RecentConfirmedTransactionsFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex)
964 {
965 AssertLockHeld(m_tx_download_mutex);
966
967 if (!m_lazy_recent_confirmed_transactions) {
968 m_lazy_recent_confirmed_transactions = std::make_unique<CRollingBloomFilter>(48'000, 0.000'001);
969 }
970
971 return *m_lazy_recent_confirmed_transactions;
972 }
973
980 std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now,
981 std::chrono::seconds average_interval) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
982
983
984 // All of the following cache a recent block, and are protected by m_most_recent_block_mutex
985 Mutex m_most_recent_block_mutex;
986 std::shared_ptr<const CBlock> m_most_recent_block GUARDED_BY(m_most_recent_block_mutex);
987 std::shared_ptr<const CBlockHeaderAndShortTxIDs> m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex);
988 uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex);
989 std::unique_ptr<const std::map<uint256, CTransactionRef>> m_most_recent_block_txs GUARDED_BY(m_most_recent_block_mutex);
990
991 // Data about the low-work headers synchronization, aggregated from all peers' HeadersSyncStates.
993 Mutex m_headers_presync_mutex;
1001 using HeadersPresyncStats = std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
1003 std::map<NodeId, HeadersPresyncStats> m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex) {};
1005 NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex) {-1};
1007 std::atomic_bool m_headers_presync_should_signal{false};
1008
1010 int m_highest_fast_announce GUARDED_BY(::cs_main){0};
1011
1013 bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1014
1016 bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1017
1025 void RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1026
1027 /* Mark a block as in flight
1028 * Returns false, still setting pit, if the block was already in flight from the same peer
1029 * pit will only be valid as long as the same cs_main lock is being held
1030 */
1031 bool BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1032
1033 bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1034
1038 void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1039
1041 void TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex* from_tip, const CBlockIndex* target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1042
1070 void FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain=nullptr, NodeId* nodeStaller=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1071
1072 /* Multimap used to preserve insertion order */
1073 typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap;
1074 BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main);
1075
1077 std::atomic<std::chrono::seconds> m_last_tip_update{0s};
1078
1080 CTransactionRef FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
1082
1083 void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
1084 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex)
1086
1088 void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked);
1089
1091 void ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions)
1092 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex);
1093
1100 void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1101
1103 std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
1104
1106 int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
1107
1109 TxOrphanage m_orphanage GUARDED_BY(m_tx_download_mutex);
1110
1111 void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1112
1116 std::vector<CTransactionRef> vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex);
1118 size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0;
1119
1121 void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1123 void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1124 bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1125
1130 int64_t ApproximateBestBlockDepth() const;
1131
1138 bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1139 bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1140 void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
1141 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex);
1142
1158 bool PrepareBlockFilterRequest(CNode& node, Peer& peer,
1159 BlockFilterType filter_type, uint32_t start_height,
1160 const uint256& stop_hash, uint32_t max_height_diff,
1161 const CBlockIndex*& stop_index,
1162 BlockFilterIndex*& filter_index);
1163
1173 void ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv);
1174
1184 void ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv);
1185
1195 void ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv);
1196
1203 bool SetupAddressRelay(const CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1204
1205 void AddAddressKnown(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1206 void PushAddress(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1207};
1208
1209const CNodeState* PeerManagerImpl::State(NodeId pnode) const
1210{
1211 std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1212 if (it == m_node_states.end())
1213 return nullptr;
1214 return &it->second;
1215}
1216
1217CNodeState* PeerManagerImpl::State(NodeId pnode)
1218{
1219 return const_cast<CNodeState*>(std::as_const(*this).State(pnode));
1220}
1221
1227static bool IsAddrCompatible(const Peer& peer, const CAddress& addr)
1228{
1229 return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
1230}
1231
1232void PeerManagerImpl::AddAddressKnown(Peer& peer, const CAddress& addr)
1233{
1234 assert(peer.m_addr_known);
1235 peer.m_addr_known->insert(addr.GetKey());
1236}
1237
1238void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr)
1239{
1240 // Known checking here is only to save space from duplicates.
1241 // Before sending, we'll filter it again for known addresses that were
1242 // added after addresses were pushed.
1243 assert(peer.m_addr_known);
1244 if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) {
1245 if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) {
1246 peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] = addr;
1247 } else {
1248 peer.m_addrs_to_send.push_back(addr);
1249 }
1250 }
1251}
1252
1253static void AddKnownTx(Peer& peer, const uint256& hash)
1254{
1255 auto tx_relay = peer.GetTxRelay();
1256 if (!tx_relay) return;
1257
1258 LOCK(tx_relay->m_tx_inventory_mutex);
1259 tx_relay->m_tx_inventory_known_filter.insert(hash);
1260}
1261
1263static bool CanServeBlocks(const Peer& peer)
1264{
1265 return peer.m_their_services & (NODE_NETWORK|NODE_NETWORK_LIMITED);
1266}
1267
1270static bool IsLimitedPeer(const Peer& peer)
1271{
1272 return (!(peer.m_their_services & NODE_NETWORK) &&
1273 (peer.m_their_services & NODE_NETWORK_LIMITED));
1274}
1275
1277static bool CanServeWitnesses(const Peer& peer)
1278{
1279 return peer.m_their_services & NODE_WITNESS;
1280}
1281
1282std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1283 std::chrono::seconds average_interval)
1284{
1285 if (m_next_inv_to_inbounds.load() < now) {
1286 // If this function were called from multiple threads simultaneously
1287 // it would possible that both update the next send variable, and return a different result to their caller.
1288 // This is not possible in practice as only the net processing thread invokes this function.
1289 m_next_inv_to_inbounds = now + m_rng.rand_exp_duration(average_interval);
1290 }
1291 return m_next_inv_to_inbounds;
1292}
1293
1294bool PeerManagerImpl::IsBlockRequested(const uint256& hash)
1295{
1296 return mapBlocksInFlight.count(hash);
1297}
1298
1299bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash)
1300{
1301 for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1302 auto [nodeid, block_it] = range.first->second;
1303 CNodeState& nodestate = *Assert(State(nodeid));
1304 if (!nodestate.m_is_inbound) return true;
1305 }
1306
1307 return false;
1308}
1309
1310void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer)
1311{
1312 auto range = mapBlocksInFlight.equal_range(hash);
1313 if (range.first == range.second) {
1314 // Block was not requested from any peer
1315 return;
1316 }
1317
1318 // We should not have requested too many of this block
1319 Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1320
1321 while (range.first != range.second) {
1322 auto [node_id, list_it] = range.first->second;
1323
1324 if (from_peer && *from_peer != node_id) {
1325 range.first++;
1326 continue;
1327 }
1328
1329 CNodeState& state = *Assert(State(node_id));
1330
1331 if (state.vBlocksInFlight.begin() == list_it) {
1332 // First block on the queue was received, update the start download time for the next one
1333 state.m_downloading_since = std::max(state.m_downloading_since, GetTime<std::chrono::microseconds>());
1334 }
1335 state.vBlocksInFlight.erase(list_it);
1336
1337 if (state.vBlocksInFlight.empty()) {
1338 // Last validated block on the queue for this peer was received.
1339 m_peers_downloading_from--;
1340 }
1341 state.m_stalling_since = 0us;
1342
1343 range.first = mapBlocksInFlight.erase(range.first);
1344 }
1345}
1346
1347bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit)
1348{
1349 const uint256& hash{block.GetBlockHash()};
1350
1351 CNodeState *state = State(nodeid);
1352 assert(state != nullptr);
1353
1354 Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1355
1356 // Short-circuit most stuff in case it is from the same node
1357 for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1358 if (range.first->second.first == nodeid) {
1359 if (pit) {
1360 *pit = &range.first->second.second;
1361 }
1362 return false;
1363 }
1364 }
1365
1366 // Make sure it's not being fetched already from same peer.
1367 RemoveBlockRequest(hash, nodeid);
1368
1369 std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
1370 {&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)});
1371 if (state->vBlocksInFlight.size() == 1) {
1372 // We're starting a block download (batch) from this peer.
1373 state->m_downloading_since = GetTime<std::chrono::microseconds>();
1374 m_peers_downloading_from++;
1375 }
1376 auto itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it)));
1377 if (pit) {
1378 *pit = &itInFlight->second.second;
1379 }
1380 return true;
1381}
1382
1383void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
1384{
1386
1387 // When in -blocksonly mode, never request high-bandwidth mode from peers. Our
1388 // mempool will not contain the transactions necessary to reconstruct the
1389 // compact block.
1390 if (m_opts.ignore_incoming_txs) return;
1391
1392 CNodeState* nodestate = State(nodeid);
1393 if (!nodestate || !nodestate->m_provides_cmpctblocks) {
1394 // Don't request compact blocks if the peer has not signalled support
1395 return;
1396 }
1397
1398 int num_outbound_hb_peers = 0;
1399 for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1400 if (*it == nodeid) {
1401 lNodesAnnouncingHeaderAndIDs.erase(it);
1402 lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1403 return;
1404 }
1405 CNodeState *state = State(*it);
1406 if (state != nullptr && !state->m_is_inbound) ++num_outbound_hb_peers;
1407 }
1408 if (nodestate->m_is_inbound) {
1409 // If we're adding an inbound HB peer, make sure we're not removing
1410 // our last outbound HB peer in the process.
1411 if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) {
1412 CNodeState *remove_node = State(lNodesAnnouncingHeaderAndIDs.front());
1413 if (remove_node != nullptr && !remove_node->m_is_inbound) {
1414 // Put the HB outbound peer in the second slot, so that it
1415 // doesn't get removed.
1416 std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1417 }
1418 }
1419 }
1420 m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1422 if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1423 // As per BIP152, we only get 3 of our peers to announce
1424 // blocks using compact encodings.
1425 m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this](CNode* pnodeStop){
1426 MakeAndPushMessage(*pnodeStop, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION);
1427 // save BIP152 bandwidth state: we select peer to be low-bandwidth
1428 pnodeStop->m_bip152_highbandwidth_to = false;
1429 return true;
1430 });
1431 lNodesAnnouncingHeaderAndIDs.pop_front();
1432 }
1433 MakeAndPushMessage(*pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/true, /*version=*/CMPCTBLOCKS_VERSION);
1434 // save BIP152 bandwidth state: we select peer to be high-bandwidth
1435 pfrom->m_bip152_highbandwidth_to = true;
1436 lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
1437 return true;
1438 });
1439}
1440
1441bool PeerManagerImpl::TipMayBeStale()
1442{
1444 const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
1445 if (m_last_tip_update.load() == 0s) {
1446 m_last_tip_update = GetTime<std::chrono::seconds>();
1447 }
1448 return m_last_tip_update.load() < GetTime<std::chrono::seconds>() - std::chrono::seconds{consensusParams.nPowTargetSpacing * 3} && mapBlocksInFlight.empty();
1449}
1450
1451int64_t PeerManagerImpl::ApproximateBestBlockDepth() const
1452{
1453 return (GetTime<std::chrono::seconds>() - m_best_block_time.load()).count() / m_chainparams.GetConsensus().nPowTargetSpacing;
1454}
1455
1456bool PeerManagerImpl::CanDirectFetch()
1457{
1458 return m_chainman.ActiveChain().Tip()->Time() > NodeClock::now() - m_chainparams.GetConsensus().PowTargetSpacing() * 20;
1459}
1460
1461static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1462{
1463 if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
1464 return true;
1465 if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
1466 return true;
1467 return false;
1468}
1469
1470void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
1471 CNodeState *state = State(nodeid);
1472 assert(state != nullptr);
1473
1474 if (!state->hashLastUnknownBlock.IsNull()) {
1475 const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
1476 if (pindex && pindex->nChainWork > 0) {
1477 if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1478 state->pindexBestKnownBlock = pindex;
1479 }
1480 state->hashLastUnknownBlock.SetNull();
1481 }
1482 }
1483}
1484
1485void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
1486 CNodeState *state = State(nodeid);
1487 assert(state != nullptr);
1488
1489 ProcessBlockAvailability(nodeid);
1490
1491 const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
1492 if (pindex && pindex->nChainWork > 0) {
1493 // An actually better block was announced.
1494 if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1495 state->pindexBestKnownBlock = pindex;
1496 }
1497 } else {
1498 // An unknown block was announced; just assume that the latest one is the best one.
1499 state->hashLastUnknownBlock = hash;
1500 }
1501}
1502
1503// Logic for calculating which blocks to download from a given peer, given our current tip.
1504void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller)
1505{
1506 if (count == 0)
1507 return;
1508
1509 vBlocks.reserve(vBlocks.size() + count);
1510 CNodeState *state = State(peer.m_id);
1511 assert(state != nullptr);
1512
1513 // Make sure pindexBestKnownBlock is up to date, we'll need it.
1514 ProcessBlockAvailability(peer.m_id);
1515
1516 if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) {
1517 // This peer has nothing interesting.
1518 return;
1519 }
1520
1521 // When we sync with AssumeUtxo and discover the snapshot is not in the peer's best chain, abort:
1522 // We can't reorg to this chain due to missing undo data until the background sync has finished,
1523 // so downloading blocks from it would be futile.
1524 const CBlockIndex* snap_base{m_chainman.GetSnapshotBaseBlock()};
1525 if (snap_base && state->pindexBestKnownBlock->GetAncestor(snap_base->nHeight) != snap_base) {
1526 LogDebug(BCLog::NET, "Not downloading blocks from peer=%d, which doesn't have the snapshot block in its best chain.\n", peer.m_id);
1527 return;
1528 }
1529
1530 // Bootstrap quickly by guessing a parent of our best tip is the forking point.
1531 // Guessing wrong in either direction is not a problem.
1532 // Also reset pindexLastCommonBlock after a snapshot was loaded, so that blocks after the snapshot will be prioritised for download.
1533 if (state->pindexLastCommonBlock == nullptr ||
1534 (snap_base && state->pindexLastCommonBlock->nHeight < snap_base->nHeight)) {
1535 state->pindexLastCommonBlock = m_chainman.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.ActiveChain().Height())];
1536 }
1537
1538 // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
1539 // of its current tip anymore. Go back enough to fix that.
1540 state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
1541 if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
1542 return;
1543
1544 const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
1545 // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
1546 // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
1547 // download that next block if the window were 1 larger.
1548 int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
1549
1550 FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd, &m_chainman.ActiveChain(), &nodeStaller);
1551}
1552
1553void PeerManagerImpl::TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex *from_tip, const CBlockIndex* target_block)
1554{
1555 Assert(from_tip);
1556 Assert(target_block);
1557
1558 if (vBlocks.size() >= count) {
1559 return;
1560 }
1561
1562 vBlocks.reserve(count);
1563 CNodeState *state = Assert(State(peer.m_id));
1564
1565 if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) != target_block) {
1566 // This peer can't provide us the complete series of blocks leading up to the
1567 // assumeutxo snapshot base.
1568 //
1569 // Presumably this peer's chain has less work than our ActiveChain()'s tip, or else we
1570 // will eventually crash when we try to reorg to it. Let other logic
1571 // deal with whether we disconnect this peer.
1572 //
1573 // TODO at some point in the future, we might choose to request what blocks
1574 // this peer does have from the historical chain, despite it not having a
1575 // complete history beneath the snapshot base.
1576 return;
1577 }
1578
1579 FindNextBlocks(vBlocks, peer, state, from_tip, count, std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW, target_block->nHeight));
1580}
1581
1582void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain, NodeId* nodeStaller)
1583{
1584 std::vector<const CBlockIndex*> vToFetch;
1585 int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
1586 bool is_limited_peer = IsLimitedPeer(peer);
1587 NodeId waitingfor = -1;
1588 while (pindexWalk->nHeight < nMaxHeight) {
1589 // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
1590 // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
1591 // as iterating over ~100 CBlockIndex* entries anyway.
1592 int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
1593 vToFetch.resize(nToFetch);
1594 pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
1595 vToFetch[nToFetch - 1] = pindexWalk;
1596 for (unsigned int i = nToFetch - 1; i > 0; i--) {
1597 vToFetch[i - 1] = vToFetch[i]->pprev;
1598 }
1599
1600 // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
1601 // are not yet downloaded and not in flight to vBlocks. In the meantime, update
1602 // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
1603 // already part of our chain (and therefore don't need it even if pruned).
1604 for (const CBlockIndex* pindex : vToFetch) {
1605 if (!pindex->IsValid(BLOCK_VALID_TREE)) {
1606 // We consider the chain that this peer is on invalid.
1607 return;
1608 }
1609
1610 if (!CanServeWitnesses(peer) && DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) {
1611 // We wouldn't download this block or its descendants from this peer.
1612 return;
1613 }
1614
1615 if (pindex->nStatus & BLOCK_HAVE_DATA || (activeChain && activeChain->Contains(pindex))) {
1616 if (activeChain && pindex->HaveNumChainTxs()) {
1617 state->pindexLastCommonBlock = pindex;
1618 }
1619 continue;
1620 }
1621
1622 // Is block in-flight?
1623 if (IsBlockRequested(pindex->GetBlockHash())) {
1624 if (waitingfor == -1) {
1625 // This is the first already-in-flight block.
1626 waitingfor = mapBlocksInFlight.lower_bound(pindex->GetBlockHash())->second.first;
1627 }
1628 continue;
1629 }
1630
1631 // The block is not already downloaded, and not yet in flight.
1632 if (pindex->nHeight > nWindowEnd) {
1633 // We reached the end of the window.
1634 if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
1635 // We aren't able to fetch anything, but we would be if the download window was one larger.
1636 if (nodeStaller) *nodeStaller = waitingfor;
1637 }
1638 return;
1639 }
1640
1641 // Don't request blocks that go further than what limited peers can provide
1642 if (is_limited_peer && (state->pindexBestKnownBlock->nHeight - pindex->nHeight >= static_cast<int>(NODE_NETWORK_LIMITED_MIN_BLOCKS) - 2 /* two blocks buffer for possible races */)) {
1643 continue;
1644 }
1645
1646 vBlocks.push_back(pindex);
1647 if (vBlocks.size() == count) {
1648 return;
1649 }
1650 }
1651 }
1652}
1653
1654} // namespace
1655
1656void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer)
1657{
1658 uint64_t my_services{peer.m_our_services};
1659 const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())};
1660 uint64_t nonce = pnode.GetLocalNonce();
1661 const int nNodeStartingHeight{m_best_height};
1662 NodeId nodeid = pnode.GetId();
1663 CAddress addr = pnode.addr;
1664
1665 CService addr_you = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ? addr : CService();
1666 uint64_t your_services{addr.nServices};
1667
1668 const bool tx_relay{!RejectIncomingTxs(pnode)};
1669 MakeAndPushMessage(pnode, NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime,
1670 your_services, CNetAddr::V1(addr_you), // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime)
1671 my_services, CNetAddr::V1(CService{}), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime)
1672 nonce, strSubVersion, nNodeStartingHeight, tx_relay);
1673
1674 if (fLogIPs) {
1675 LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, them=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToStringAddrPort(), tx_relay, nodeid);
1676 } else {
1677 LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid);
1678 }
1679}
1680
1681void PeerManagerImpl::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
1682{
1683 AssertLockHeld(::cs_main); // for State
1684 AssertLockHeld(m_tx_download_mutex); // For m_txrequest
1685 NodeId nodeid = node.GetId();
1686 if (!node.HasPermission(NetPermissionFlags::Relay) && m_txrequest.Count(nodeid) >= MAX_PEER_TX_ANNOUNCEMENTS) {
1687 // Too many queued announcements from this peer
1688 return;
1689 }
1690 const CNodeState* state = State(nodeid);
1691
1692 // Decide the TxRequestTracker parameters for this announcement:
1693 // - "preferred": if fPreferredDownload is set (= outbound, or NetPermissionFlags::NoBan permission)
1694 // - "reqtime": current time plus delays for:
1695 // - NONPREF_PEER_TX_DELAY for announcements from non-preferred connections
1696 // - TXID_RELAY_DELAY for txid announcements while wtxid peers are available
1697 // - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least
1698 // MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight (and don't have NetPermissionFlags::Relay).
1699 auto delay{0us};
1700 const bool preferred = state->fPreferredDownload;
1701 if (!preferred) delay += NONPREF_PEER_TX_DELAY;
1702 if (!gtxid.IsWtxid() && m_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY;
1703 const bool overloaded = !node.HasPermission(NetPermissionFlags::Relay) &&
1704 m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT;
1705 if (overloaded) delay += OVERLOADED_PEER_TX_DELAY;
1706 m_txrequest.ReceivedInv(nodeid, gtxid, preferred, current_time + delay);
1707}
1708
1709void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
1710{
1711 LOCK(cs_main);
1712 CNodeState *state = State(node);
1713 if (state) state->m_last_block_announcement = time_in_seconds;
1714}
1715
1716void PeerManagerImpl::InitializeNode(const CNode& node, ServiceFlags our_services)
1717{
1718 NodeId nodeid = node.GetId();
1719 {
1720 LOCK(cs_main); // For m_node_states
1721 m_node_states.emplace_hint(m_node_states.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(node.IsInboundConn()));
1722 }
1723 {
1724 LOCK(m_tx_download_mutex);
1725 assert(m_txrequest.Count(nodeid) == 0);
1726 }
1727
1729 our_services = static_cast<ServiceFlags>(our_services | NODE_BLOOM);
1730 }
1731
1732 PeerRef peer = std::make_shared<Peer>(nodeid, our_services);
1733 {
1734 LOCK(m_peer_mutex);
1735 m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
1736 }
1737}
1738
1739void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler)
1740{
1741 std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
1742
1743 for (const auto& txid : unbroadcast_txids) {
1744 CTransactionRef tx = m_mempool.get(txid);
1745
1746 if (tx != nullptr) {
1747 RelayTransaction(txid, tx->GetWitnessHash());
1748 } else {
1749 m_mempool.RemoveUnbroadcastTx(txid, true);
1750 }
1751 }
1752
1753 // Schedule next run for 10-15 minutes in the future.
1754 // We add randomness on every cycle to avoid the possibility of P2P fingerprinting.
1755 const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min);
1756 scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1757}
1758
1759void PeerManagerImpl::FinalizeNode(const CNode& node)
1760{
1761 NodeId nodeid = node.GetId();
1762 {
1763 LOCK(cs_main);
1764 {
1765 // We remove the PeerRef from g_peer_map here, but we don't always
1766 // destruct the Peer. Sometimes another thread is still holding a
1767 // PeerRef, so the refcount is >= 1. Be careful not to do any
1768 // processing here that assumes Peer won't be changed before it's
1769 // destructed.
1770 PeerRef peer = RemovePeer(nodeid);
1771 assert(peer != nullptr);
1772 m_wtxid_relay_peers -= peer->m_wtxid_relay;
1773 assert(m_wtxid_relay_peers >= 0);
1774 }
1775 CNodeState *state = State(nodeid);
1776 assert(state != nullptr);
1777
1778 if (state->fSyncStarted)
1779 nSyncStarted--;
1780
1781 for (const QueuedBlock& entry : state->vBlocksInFlight) {
1782 auto range = mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
1783 while (range.first != range.second) {
1784 auto [node_id, list_it] = range.first->second;
1785 if (node_id != nodeid) {
1786 range.first++;
1787 } else {
1788 range.first = mapBlocksInFlight.erase(range.first);
1789 }
1790 }
1791 }
1792 {
1793 LOCK(m_tx_download_mutex);
1794 m_orphanage.EraseForPeer(nodeid);
1795 m_txrequest.DisconnectedPeer(nodeid);
1796 }
1797 if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid);
1798 m_num_preferred_download_peers -= state->fPreferredDownload;
1799 m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
1800 assert(m_peers_downloading_from >= 0);
1801 m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
1802 assert(m_outbound_peers_with_protect_from_disconnect >= 0);
1803
1804 m_node_states.erase(nodeid);
1805
1806 if (m_node_states.empty()) {
1807 // Do a consistency check after the last peer is removed.
1808 assert(mapBlocksInFlight.empty());
1809 assert(m_num_preferred_download_peers == 0);
1810 assert(m_peers_downloading_from == 0);
1811 assert(m_outbound_peers_with_protect_from_disconnect == 0);
1812 assert(m_wtxid_relay_peers == 0);
1813 LOCK(m_tx_download_mutex);
1814 assert(m_txrequest.Size() == 0);
1815 assert(m_orphanage.Size() == 0);
1816 }
1817 } // cs_main
1818 if (node.fSuccessfullyConnected &&
1819 !node.IsBlockOnlyConn() && !node.IsInboundConn()) {
1820 // Only change visible addrman state for full outbound peers. We don't
1821 // call Connected() for feeler connections since they don't have
1822 // fSuccessfullyConnected set.
1823 m_addrman.Connected(node.addr);
1824 }
1825 {
1826 LOCK(m_headers_presync_mutex);
1827 m_headers_presync_stats.erase(nodeid);
1828 }
1829 LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
1830}
1831
1832bool PeerManagerImpl::HasAllDesirableServiceFlags(ServiceFlags services) const
1833{
1834 // Shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services)
1835 return !(GetDesirableServiceFlags(services) & (~services));
1836}
1837
1838ServiceFlags PeerManagerImpl::GetDesirableServiceFlags(ServiceFlags services) const
1839{
1840 if (services & NODE_NETWORK_LIMITED) {
1841 // Limited peers are desirable when we are close to the tip.
1842 if (ApproximateBestBlockDepth() < NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS) {
1844 }
1845 }
1847}
1848
1849PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const
1850{
1851 LOCK(m_peer_mutex);
1852 auto it = m_peer_map.find(id);
1853 return it != m_peer_map.end() ? it->second : nullptr;
1854}
1855
1856PeerRef PeerManagerImpl::RemovePeer(NodeId id)
1857{
1858 PeerRef ret;
1859 LOCK(m_peer_mutex);
1860 auto it = m_peer_map.find(id);
1861 if (it != m_peer_map.end()) {
1862 ret = std::move(it->second);
1863 m_peer_map.erase(it);
1864 }
1865 return ret;
1866}
1867
1868bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const
1869{
1870 {
1871 LOCK(cs_main);
1872 const CNodeState* state = State(nodeid);
1873 if (state == nullptr)
1874 return false;
1875 stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
1876 stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
1877 for (const QueuedBlock& queue : state->vBlocksInFlight) {
1878 if (queue.pindex)
1879 stats.vHeightInFlight.push_back(queue.pindex->nHeight);
1880 }
1881 }
1882
1883 PeerRef peer = GetPeerRef(nodeid);
1884 if (peer == nullptr) return false;
1885 stats.their_services = peer->m_their_services;
1886 stats.m_starting_height = peer->m_starting_height;
1887 // It is common for nodes with good ping times to suddenly become lagged,
1888 // due to a new block arriving or other large transfer.
1889 // Merely reporting pingtime might fool the caller into thinking the node was still responsive,
1890 // since pingtime does not update until the ping is complete, which might take a while.
1891 // So, if a ping is taking an unusually long time in flight,
1892 // the caller can immediately detect that this is happening.
1893 auto ping_wait{0us};
1894 if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) {
1895 ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
1896 }
1897
1898 if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
1899 stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs);
1900 stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load();
1901 } else {
1902 stats.m_relay_txs = false;
1903 stats.m_fee_filter_received = 0;
1904 }
1905
1906 stats.m_ping_wait = ping_wait;
1907 stats.m_addr_processed = peer->m_addr_processed.load();
1908 stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
1909 stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
1910 {
1911 LOCK(peer->m_headers_sync_mutex);
1912 if (peer->m_headers_sync) {
1913 stats.presync_height = peer->m_headers_sync->GetPresyncHeight();
1914 }
1915 }
1916 stats.time_offset = peer->m_time_offset;
1917
1918 return true;
1919}
1920
1921PeerManagerInfo PeerManagerImpl::GetInfo() const
1922{
1923 return PeerManagerInfo{
1924 .median_outbound_time_offset = m_outbound_time_offsets.Median(),
1925 .ignores_incoming_txs = m_opts.ignore_incoming_txs,
1926 };
1927}
1928
1929void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx)
1930{
1931 if (m_opts.max_extra_txs <= 0)
1932 return;
1933 if (!vExtraTxnForCompact.size())
1934 vExtraTxnForCompact.resize(m_opts.max_extra_txs);
1935 vExtraTxnForCompact[vExtraTxnForCompactIt] = tx;
1936 vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
1937}
1938
1939void PeerManagerImpl::Misbehaving(Peer& peer, const std::string& message)
1940{
1941 LOCK(peer.m_misbehavior_mutex);
1942
1943 const std::string message_prefixed = message.empty() ? "" : (": " + message);
1944 peer.m_should_discourage = true;
1945 LogPrint(BCLog::NET, "Misbehaving: peer=%d%s\n", peer.m_id, message_prefixed);
1946}
1947
1948void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
1949 bool via_compact_block, const std::string& message)
1950{
1951 PeerRef peer{GetPeerRef(nodeid)};
1952 switch (state.GetResult()) {
1954 break;
1956 // We didn't try to process the block because the header chain may have
1957 // too little work.
1958 break;
1959 // The node is providing invalid data:
1962 if (!via_compact_block) {
1963 if (peer) Misbehaving(*peer, message);
1964 return;
1965 }
1966 break;
1968 {
1969 LOCK(cs_main);
1970 CNodeState *node_state = State(nodeid);
1971 if (node_state == nullptr) {
1972 break;
1973 }
1974
1975 // Discourage outbound (but not inbound) peers if on an invalid chain.
1976 // Exempt HB compact block peers. Manual connections are always protected from discouragement.
1977 if (!via_compact_block && !node_state->m_is_inbound) {
1978 if (peer) Misbehaving(*peer, message);
1979 return;
1980 }
1981 break;
1982 }
1986 if (peer) Misbehaving(*peer, message);
1987 return;
1988 // Conflicting (but not necessarily invalid) data or different policy:
1990 if (peer) Misbehaving(*peer, message);
1991 return;
1994 break;
1995 }
1996 if (message != "") {
1997 LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1998 }
1999}
2000
2001void PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
2002{
2003 PeerRef peer{GetPeerRef(nodeid)};
2004 switch (state.GetResult()) {
2006 break;
2007 // The node is providing invalid data:
2009 if (peer) Misbehaving(*peer, "");
2010 return;
2011 // Conflicting (but not necessarily invalid) data or different policy:
2024 break;
2025 }
2026}
2027
2028bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
2029{
2031 if (m_chainman.ActiveChain().Contains(pindex)) return true;
2032 return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (m_chainman.m_best_header != nullptr) &&
2033 (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) &&
2034 (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
2035}
2036
2037std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index)
2038{
2039 if (m_chainman.m_blockman.LoadingBlocks()) return "Loading blocks ...";
2040
2041 // Ensure this peer exists and hasn't been disconnected
2042 PeerRef peer = GetPeerRef(peer_id);
2043 if (peer == nullptr) return "Peer does not exist";
2044
2045 // Ignore pre-segwit peers
2046 if (!CanServeWitnesses(*peer)) return "Pre-SegWit peer";
2047
2048 LOCK(cs_main);
2049
2050 // Forget about all prior requests
2051 RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt);
2052
2053 // Mark block as in-flight
2054 if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer";
2055
2056 // Construct message to request the block
2057 const uint256& hash{block_index.GetBlockHash()};
2058 std::vector<CInv> invs{CInv(MSG_BLOCK | MSG_WITNESS_FLAG, hash)};
2059
2060 // Send block request message to the peer
2061 bool success = m_connman.ForNode(peer_id, [this, &invs](CNode* node) {
2062 this->MakeAndPushMessage(*node, NetMsgType::GETDATA, invs);
2063 return true;
2064 });
2065
2066 if (!success) return "Peer not fully connected";
2067
2068 LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
2069 hash.ToString(), peer_id);
2070 return std::nullopt;
2071}
2072
2073std::unique_ptr<PeerManager> PeerManager::make(CConnman& connman, AddrMan& addrman,
2074 BanMan* banman, ChainstateManager& chainman,
2075 CTxMemPool& pool, node::Warnings& warnings, Options opts)
2076{
2077 return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, warnings, opts);
2078}
2079
2080PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman,
2081 BanMan* banman, ChainstateManager& chainman,
2082 CTxMemPool& pool, node::Warnings& warnings, Options opts)
2083 : m_rng{opts.deterministic_rng},
2084 m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}, m_rng},
2085 m_chainparams(chainman.GetParams()),
2086 m_connman(connman),
2087 m_addrman(addrman),
2088 m_banman(banman),
2089 m_chainman(chainman),
2090 m_mempool(pool),
2091 m_warnings{warnings},
2092 m_opts{opts}
2093{
2094 // While Erlay support is incomplete, it must be enabled explicitly via -txreconciliation.
2095 // This argument can go away after Erlay support is complete.
2096 if (opts.reconcile_txs) {
2097 m_txreconciliation = std::make_unique<TxReconciliationTracker>(TXRECONCILIATION_VERSION);
2098 }
2099}
2100
2101void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler)
2102{
2103 // Stale tip checking and peer eviction are on two different timers, but we
2104 // don't want them to get out of sync due to drift in the scheduler, so we
2105 // combine them in one function and schedule at the quicker (peer-eviction)
2106 // timer.
2107 static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
2108 scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
2109
2110 // schedule next run for 10-15 minutes in the future
2111 const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min);
2112 scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
2113}
2114
2115void PeerManagerImpl::ActiveTipChange(const CBlockIndex& new_tip, bool is_ibd)
2116{
2117 // Ensure mempool mutex was released, otherwise deadlock may occur if another thread holding
2118 // m_tx_download_mutex waits on the mempool mutex.
2119 AssertLockNotHeld(m_mempool.cs);
2120 AssertLockNotHeld(m_tx_download_mutex);
2121
2122 if (!is_ibd) {
2123 LOCK(m_tx_download_mutex);
2124 // If the chain tip has changed, previously rejected transactions might now be valid, e.g. due
2125 // to a timelock. Reset the rejection filters to give those transactions another chance if we
2126 // see them again.
2127 RecentRejectsFilter().reset();
2128 RecentRejectsReconsiderableFilter().reset();
2129 }
2130}
2131
2138void PeerManagerImpl::BlockConnected(
2139 ChainstateRole role,
2140 const std::shared_ptr<const CBlock>& pblock,
2141 const CBlockIndex* pindex)
2142{
2143 // Update this for all chainstate roles so that we don't mistakenly see peers
2144 // helping us do background IBD as having a stale tip.
2145 m_last_tip_update = GetTime<std::chrono::seconds>();
2146
2147 // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value
2148 auto stalling_timeout = m_block_stalling_timeout.load();
2149 Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
2150 if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
2151 const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT);
2152 if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
2153 LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout));
2154 }
2155 }
2156
2157 // The following task can be skipped since we don't maintain a mempool for
2158 // the ibd/background chainstate.
2159 if (role == ChainstateRole::BACKGROUND) {
2160 return;
2161 }
2162 LOCK(m_tx_download_mutex);
2163 m_orphanage.EraseForBlock(*pblock);
2164
2165 for (const auto& ptx : pblock->vtx) {
2166 RecentConfirmedTransactionsFilter().insert(ptx->GetHash().ToUint256());
2167 if (ptx->HasWitness()) {
2168 RecentConfirmedTransactionsFilter().insert(ptx->GetWitnessHash().ToUint256());
2169 }
2170 m_txrequest.ForgetTxHash(ptx->GetHash());
2171 m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
2172 }
2173}
2174
2175void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
2176{
2177 // To avoid relay problems with transactions that were previously
2178 // confirmed, clear our filter of recently confirmed transactions whenever
2179 // there's a reorg.
2180 // This means that in a 1-block reorg (where 1 block is disconnected and
2181 // then another block reconnected), our filter will drop to having only one
2182 // block's worth of transactions in it, but that should be fine, since
2183 // presumably the most common case of relaying a confirmed transaction
2184 // should be just after a new block containing it is found.
2185 LOCK(m_tx_download_mutex);
2186 RecentConfirmedTransactionsFilter().reset();
2187}
2188
2193void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock)
2194{
2195 auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock, FastRandomContext().rand64());
2196
2197 LOCK(cs_main);
2198
2199 if (pindex->nHeight <= m_highest_fast_announce)
2200 return;
2201 m_highest_fast_announce = pindex->nHeight;
2202
2203 if (!DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) return;
2204
2205 uint256 hashBlock(pblock->GetHash());
2206 const std::shared_future<CSerializedNetMsg> lazy_ser{
2207 std::async(std::launch::deferred, [&] { return NetMsg::Make(NetMsgType::CMPCTBLOCK, *pcmpctblock); })};
2208
2209 {
2210 auto most_recent_block_txs = std::make_unique<std::map<uint256, CTransactionRef>>();
2211 for (const auto& tx : pblock->vtx) {
2212 most_recent_block_txs->emplace(tx->GetHash(), tx);
2213 most_recent_block_txs->emplace(tx->GetWitnessHash(), tx);
2214 }
2215
2216 LOCK(m_most_recent_block_mutex);
2217 m_most_recent_block_hash = hashBlock;
2218 m_most_recent_block = pblock;
2219 m_most_recent_compact_block = pcmpctblock;
2220 m_most_recent_block_txs = std::move(most_recent_block_txs);
2221 }
2222
2223 m_connman.ForEachNode([this, pindex, &lazy_ser, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
2225
2227 return;
2228 ProcessBlockAvailability(pnode->GetId());
2229 CNodeState &state = *State(pnode->GetId());
2230 // If the peer has, or we announced to them the previous block already,
2231 // but we don't think they have this one, go ahead and announce it
2232 if (state.m_requested_hb_cmpctblocks && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
2233
2234 LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock",
2235 hashBlock.ToString(), pnode->GetId());
2236
2237 const CSerializedNetMsg& ser_cmpctblock{lazy_ser.get()};
2238 PushMessage(*pnode, ser_cmpctblock.Copy());
2239 state.pindexBestHeaderSent = pindex;
2240 }
2241 });
2242}
2243
2248void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
2249{
2250 SetBestBlock(pindexNew->nHeight, std::chrono::seconds{pindexNew->GetBlockTime()});
2251
2252 // Don't relay inventory during initial block download.
2253 if (fInitialDownload) return;
2254
2255 // Find the hashes of all blocks that weren't previously in the best chain.
2256 std::vector<uint256> vHashes;
2257 const CBlockIndex *pindexToAnnounce = pindexNew;
2258 while (pindexToAnnounce != pindexFork) {
2259 vHashes.push_back(pindexToAnnounce->GetBlockHash());
2260 pindexToAnnounce = pindexToAnnounce->pprev;
2261 if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
2262 // Limit announcements in case of a huge reorganization.
2263 // Rely on the peer's synchronization mechanism in that case.
2264 break;
2265 }
2266 }
2267
2268 {
2269 LOCK(m_peer_mutex);
2270 for (auto& it : m_peer_map) {
2271 Peer& peer = *it.second;
2272 LOCK(peer.m_block_inv_mutex);
2273 for (const uint256& hash : vHashes | std::views::reverse) {
2274 peer.m_blocks_for_headers_relay.push_back(hash);
2275 }
2276 }
2277 }
2278
2279 m_connman.WakeMessageHandler();
2280}
2281
2286void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationState& state)
2287{
2288 LOCK(cs_main);
2289
2290 const uint256 hash(block.GetHash());
2291 std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
2292
2293 // If the block failed validation, we know where it came from and we're still connected
2294 // to that peer, maybe punish.
2295 if (state.IsInvalid() &&
2296 it != mapBlockSource.end() &&
2297 State(it->second.first)) {
2298 MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second);
2299 }
2300 // Check that:
2301 // 1. The block is valid
2302 // 2. We're not in initial block download
2303 // 3. This is currently the best block we're aware of. We haven't updated
2304 // the tip yet so we have no way to check this directly here. Instead we
2305 // just check that there are currently no other blocks in flight.
2306 else if (state.IsValid() &&
2307 !m_chainman.IsInitialBlockDownload() &&
2308 mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
2309 if (it != mapBlockSource.end()) {
2310 MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
2311 }
2312 }
2313 if (it != mapBlockSource.end())
2314 mapBlockSource.erase(it);
2315}
2316
2318//
2319// Messages
2320//
2321
2322
2323bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconsiderable)
2324{
2325 AssertLockHeld(m_tx_download_mutex);
2326
2327 const uint256& hash = gtxid.GetHash();
2328
2329 if (gtxid.IsWtxid()) {
2330 // Normal query by wtxid.
2331 if (m_orphanage.HaveTx(Wtxid::FromUint256(hash))) return true;
2332 } else {
2333 // Never query by txid: it is possible that the transaction in the orphanage has the same
2334 // txid but a different witness, which would give us a false positive result. If we decided
2335 // not to request the transaction based on this result, an attacker could prevent us from
2336 // downloading a transaction by intentionally creating a malleated version of it. While
2337 // only one (or none!) of these transactions can ultimately be confirmed, we have no way of
2338 // discerning which one that is, so the orphanage can store multiple transactions with the
2339 // same txid.
2340 //
2341 // While we won't query by txid, we can try to "guess" what the wtxid is based on the txid.
2342 // A non-segwit transaction's txid == wtxid. Query this txid "casted" to a wtxid. This will
2343 // help us find non-segwit transactions, saving bandwidth, and should have no false positives.
2344 if (m_orphanage.HaveTx(Wtxid::FromUint256(hash))) return true;
2345 }
2346
2347 if (include_reconsiderable && RecentRejectsReconsiderableFilter().contains(hash)) return true;
2348
2349 if (RecentConfirmedTransactionsFilter().contains(hash)) return true;
2350
2351 return RecentRejectsFilter().contains(hash) || m_mempool.exists(gtxid);
2352}
2353
2354bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash)
2355{
2356 return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
2357}
2358
2359void PeerManagerImpl::SendPings()
2360{
2361 LOCK(m_peer_mutex);
2362 for(auto& it : m_peer_map) it.second->m_ping_queued = true;
2363}
2364
2365void PeerManagerImpl::RelayTransaction(const uint256& txid, const uint256& wtxid)
2366{
2367 LOCK(m_peer_mutex);
2368 for(auto& it : m_peer_map) {
2369 Peer& peer = *it.second;
2370 auto tx_relay = peer.GetTxRelay();
2371 if (!tx_relay) continue;
2372
2373 LOCK(tx_relay->m_tx_inventory_mutex);
2374 // Only queue transactions for announcement once the version handshake
2375 // is completed. The time of arrival for these transactions is
2376 // otherwise at risk of leaking to a spy, if the spy is able to
2377 // distinguish transactions received during the handshake from the rest
2378 // in the announcement.
2379 if (tx_relay->m_next_inv_send_time == 0s) continue;
2380
2381 const uint256& hash{peer.m_wtxid_relay ? wtxid : txid};
2382 if (!tx_relay->m_tx_inventory_known_filter.contains(hash)) {
2383 tx_relay->m_tx_inventory_to_send.insert(hash);
2384 }
2385 };
2386}
2387
2388void PeerManagerImpl::RelayAddress(NodeId originator,
2389 const CAddress& addr,
2390 bool fReachable)
2391{
2392 // We choose the same nodes within a given 24h window (if the list of connected
2393 // nodes does not change) and we don't relay to nodes that already know an
2394 // address. So within 24h we will likely relay a given address once. This is to
2395 // prevent a peer from unjustly giving their address better propagation by sending
2396 // it to us repeatedly.
2397
2398 if (!fReachable && !addr.IsRelayable()) return;
2399
2400 // Relay to a limited number of other nodes
2401 // Use deterministic randomness to send to the same nodes for 24 hours
2402 // at a time so the m_addr_knowns of the chosen nodes prevent repeats
2403 const uint64_t hash_addr{CServiceHash(0, 0)(addr)};
2404 const auto current_time{GetTime<std::chrono::seconds>()};
2405 // Adding address hash makes exact rotation time different per address, while preserving periodicity.
2406 const uint64_t time_addr{(static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) / count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)};
2408 .Write(hash_addr)
2409 .Write(time_addr)};
2410
2411 // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers.
2412 unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
2413
2414 std::array<std::pair<uint64_t, Peer*>, 2> best{{{0, nullptr}, {0, nullptr}}};
2415 assert(nRelayNodes <= best.size());
2416
2417 LOCK(m_peer_mutex);
2418
2419 for (auto& [id, peer] : m_peer_map) {
2420 if (peer->m_addr_relay_enabled && id != originator && IsAddrCompatible(*peer, addr)) {
2421 uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
2422 for (unsigned int i = 0; i < nRelayNodes; i++) {
2423 if (hashKey > best[i].first) {
2424 std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
2425 best[i] = std::make_pair(hashKey, peer.get());
2426 break;
2427 }
2428 }
2429 }
2430 };
2431
2432 for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
2433 PushAddress(*best[i].second, addr);
2434 }
2435}
2436
2437void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
2438{
2439 std::shared_ptr<const CBlock> a_recent_block;
2440 std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
2441 {
2442 LOCK(m_most_recent_block_mutex);
2443 a_recent_block = m_most_recent_block;
2444 a_recent_compact_block = m_most_recent_compact_block;
2445 }
2446
2447 bool need_activate_chain = false;
2448 {
2449 LOCK(cs_main);
2450 const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
2451 if (pindex) {
2452 if (pindex->HaveNumChainTxs() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) &&
2453 pindex->IsValid(BLOCK_VALID_TREE)) {
2454 // If we have the block and all of its parents, but have not yet validated it,
2455 // we might be in the middle of connecting it (ie in the unlock of cs_main
2456 // before ActivateBestChain but after AcceptBlock).
2457 // In this case, we need to run ActivateBestChain prior to checking the relay
2458 // conditions below.
2459 need_activate_chain = true;
2460 }
2461 }
2462 } // release cs_main before calling ActivateBestChain
2463 if (need_activate_chain) {
2465 if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
2466 LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
2467 }
2468 }
2469
2470 const CBlockIndex* pindex{nullptr};
2471 const CBlockIndex* tip{nullptr};
2472 bool can_direct_fetch{false};
2473 FlatFilePos block_pos{};
2474 {
2475 LOCK(cs_main);
2476 pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
2477 if (!pindex) {
2478 return;
2479 }
2480 if (!BlockRequestAllowed(pindex)) {
2481 LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId());
2482 return;
2483 }
2484 // disconnect node in case we have reached the outbound limit for serving historical blocks
2485 if (m_connman.OutboundTargetReached(true) &&
2486 (((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
2487 !pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target
2488 ) {
2489 LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId());
2490 pfrom.fDisconnect = true;
2491 return;
2492 }
2493 tip = m_chainman.ActiveChain().Tip();
2494 // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
2496 (((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (tip->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
2497 )) {
2498 LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, disconnect peer=%d\n", pfrom.GetId());
2499 //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
2500 pfrom.fDisconnect = true;
2501 return;
2502 }
2503 // Pruned nodes may have deleted the block, so check whether
2504 // it's available before trying to send.
2505 if (!(pindex->nStatus & BLOCK_HAVE_DATA)) {
2506 return;
2507 }
2508 can_direct_fetch = CanDirectFetch();
2509 block_pos = pindex->GetBlockPos();
2510 }
2511
2512 std::shared_ptr<const CBlock> pblock;
2513 if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
2514 pblock = a_recent_block;
2515 } else if (inv.IsMsgWitnessBlk()) {
2516 // Fast-path: in this case it is possible to serve the block directly from disk,
2517 // as the network format matches the format on disk
2518 std::vector<uint8_t> block_data;
2519 if (!m_chainman.m_blockman.ReadRawBlockFromDisk(block_data, block_pos)) {
2520 if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
2521 LogPrint(BCLog::NET, "Block was pruned before it could be read, disconnect peer=%s\n", pfrom.GetId());
2522 } else {
2523 LogError("Cannot load block from disk, disconnect peer=%d\n", pfrom.GetId());
2524 }
2525 pfrom.fDisconnect = true;
2526 return;
2527 }
2528 MakeAndPushMessage(pfrom, NetMsgType::BLOCK, Span{block_data});
2529 // Don't set pblock as we've sent the block
2530 } else {
2531 // Send block from disk
2532 std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
2533 if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead, block_pos)) {
2534 if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
2535 LogPrint(BCLog::NET, "Block was pruned before it could be read, disconnect peer=%s\n", pfrom.GetId());
2536 } else {
2537 LogError("Cannot load block from disk, disconnect peer=%d\n", pfrom.GetId());
2538 }
2539 pfrom.fDisconnect = true;
2540 return;
2541 }
2542 pblock = pblockRead;
2543 }
2544 if (pblock) {
2545 if (inv.IsMsgBlk()) {
2546 MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_NO_WITNESS(*pblock));
2547 } else if (inv.IsMsgWitnessBlk()) {
2548 MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock));
2549 } else if (inv.IsMsgFilteredBlk()) {
2550 bool sendMerkleBlock = false;
2551 CMerkleBlock merkleBlock;
2552 if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) {
2553 LOCK(tx_relay->m_bloom_filter_mutex);
2554 if (tx_relay->m_bloom_filter) {
2555 sendMerkleBlock = true;
2556 merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
2557 }
2558 }
2559 if (sendMerkleBlock) {
2560 MakeAndPushMessage(pfrom, NetMsgType::MERKLEBLOCK, merkleBlock);
2561 // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
2562 // This avoids hurting performance by pointlessly requiring a round-trip
2563 // Note that there is currently no way for a node to request any single transactions we didn't send here -
2564 // they must either disconnect and retry or request the full block.
2565 // Thus, the protocol spec specified allows for us to provide duplicate txn here,
2566 // however we MUST always provide at least what the remote peer needs
2567 typedef std::pair<unsigned int, uint256> PairType;
2568 for (PairType& pair : merkleBlock.vMatchedTxn)
2569 MakeAndPushMessage(pfrom, NetMsgType::TX, TX_NO_WITNESS(*pblock->vtx[pair.first]));
2570 }
2571 // else
2572 // no response
2573 } else if (inv.IsMsgCmpctBlk()) {
2574 // If a peer is asking for old blocks, we're almost guaranteed
2575 // they won't have a useful mempool to match against a compact block,
2576 // and we don't feel like constructing the object for them, so
2577 // instead we respond with the full, non-compact block.
2578 if (can_direct_fetch && pindex->nHeight >= tip->nHeight - MAX_CMPCTBLOCK_DEPTH) {
2579 if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) {
2580 MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, *a_recent_compact_block);
2581 } else {
2582 CBlockHeaderAndShortTxIDs cmpctblock{*pblock, m_rng.rand64()};
2583 MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, cmpctblock);
2584 }
2585 } else {
2586 MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock));
2587 }
2588 }
2589 }
2590
2591 {
2592 LOCK(peer.m_block_inv_mutex);
2593 // Trigger the peer node to send a getblocks request for the next batch of inventory
2594 if (inv.hash == peer.m_continuation_block) {
2595 // Send immediately. This must send even if redundant,
2596 // and we want it right after the last block so they don't
2597 // wait for other stuff first.
2598 std::vector<CInv> vInv;
2599 vInv.emplace_back(MSG_BLOCK, tip->GetBlockHash());
2600 MakeAndPushMessage(pfrom, NetMsgType::INV, vInv);
2601 peer.m_continuation_block.SetNull();
2602 }
2603 }
2604}
2605
2606CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
2607{
2608 // If a tx was in the mempool prior to the last INV for this peer, permit the request.
2609 auto txinfo = m_mempool.info_for_relay(gtxid, tx_relay.m_last_inv_sequence);
2610 if (txinfo.tx) {
2611 return std::move(txinfo.tx);
2612 }
2613
2614 // Or it might be from the most recent block
2615 {
2616 LOCK(m_most_recent_block_mutex);
2617 if (m_most_recent_block_txs != nullptr) {
2618 auto it = m_most_recent_block_txs->find(gtxid.GetHash());
2619 if (it != m_most_recent_block_txs->end()) return it->second;
2620 }
2621 }
2622
2623 return {};
2624}
2625
2626void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
2627{
2629
2630 auto tx_relay = peer.GetTxRelay();
2631
2632 std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
2633 std::vector<CInv> vNotFound;
2634
2635 // Process as many TX items from the front of the getdata queue as
2636 // possible, since they're common and it's efficient to batch process
2637 // them.
2638 while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) {
2639 if (interruptMsgProc) return;
2640 // The send buffer provides backpressure. If there's no space in
2641 // the buffer, pause processing until the next call.
2642 if (pfrom.fPauseSend) break;
2643
2644 const CInv &inv = *it++;
2645
2646 if (tx_relay == nullptr) {
2647 // Ignore GETDATA requests for transactions from block-relay-only
2648 // peers and peers that asked us not to announce transactions.
2649 continue;
2650 }
2651
2652 CTransactionRef tx = FindTxForGetData(*tx_relay, ToGenTxid(inv));
2653 if (tx) {
2654 // WTX and WITNESS_TX imply we serialize with witness
2655 const auto maybe_with_witness = (inv.IsMsgTx() ? TX_NO_WITNESS : TX_WITH_WITNESS);
2656 MakeAndPushMessage(pfrom, NetMsgType::TX, maybe_with_witness(*tx));
2657 m_mempool.RemoveUnbroadcastTx(tx->GetHash());
2658 } else {
2659 vNotFound.push_back(inv);
2660 }
2661 }
2662
2663 // Only process one BLOCK item per call, since they're uncommon and can be
2664 // expensive to process.
2665 if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
2666 const CInv &inv = *it++;
2667 if (inv.IsGenBlkMsg()) {
2668 ProcessGetBlockData(pfrom, peer, inv);
2669 }
2670 // else: If the first item on the queue is an unknown type, we erase it
2671 // and continue processing the queue on the next call.
2672 }
2673
2674 peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
2675
2676 if (!vNotFound.empty()) {
2677 // Let the peer know that we didn't find what it asked for, so it doesn't
2678 // have to wait around forever.
2679 // SPV clients care about this message: it's needed when they are
2680 // recursively walking the dependencies of relevant unconfirmed
2681 // transactions. SPV clients want to do that because they want to know
2682 // about (and store and rebroadcast and risk analyze) the dependencies
2683 // of transactions relevant to them, without having to download the
2684 // entire memory pool.
2685 // Also, other nodes can use these messages to automatically request a
2686 // transaction from some other peer that announced it, and stop
2687 // waiting for us to respond.
2688 // In normal operation, we often send NOTFOUND messages for parents of
2689 // transactions that we relay; if a peer is missing a parent, they may
2690 // assume we have them and request the parents from us.
2691 MakeAndPushMessage(pfrom, NetMsgType::NOTFOUND, vNotFound);
2692 }
2693}
2694
2695uint32_t PeerManagerImpl::GetFetchFlags(const Peer& peer) const
2696{
2697 uint32_t nFetchFlags = 0;
2698 if (CanServeWitnesses(peer)) {
2699 nFetchFlags |= MSG_WITNESS_FLAG;
2700 }
2701 return nFetchFlags;
2702}
2703
2704void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req)
2705{
2706 BlockTransactions resp(req);
2707 for (size_t i = 0; i < req.indexes.size(); i++) {
2708 if (req.indexes[i] >= block.vtx.size()) {
2709 Misbehaving(peer, "getblocktxn with out-of-bounds tx indices");
2710 return;
2711 }
2712 resp.txn[i] = block.vtx[req.indexes[i]];
2713 }
2714
2715 MakeAndPushMessage(pfrom, NetMsgType::BLOCKTXN, resp);
2716}
2717
2718bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer)
2719{
2720 // Do these headers have proof-of-work matching what's claimed?
2721 if (!HasValidProofOfWork(headers, consensusParams)) {
2722 Misbehaving(peer, "header with invalid proof of work");
2723 return false;
2724 }
2725
2726 // Are these headers connected to each other?
2727 if (!CheckHeadersAreContinuous(headers)) {
2728 Misbehaving(peer, "non-continuous headers sequence");
2729 return false;
2730 }
2731 return true;
2732}
2733
2734arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold()
2735{
2736 arith_uint256 near_chaintip_work = 0;
2737 LOCK(cs_main);
2738 if (m_chainman.ActiveChain().Tip() != nullptr) {
2739 const CBlockIndex *tip = m_chainman.ActiveChain().Tip();
2740 // Use a 144 block buffer, so that we'll accept headers that fork from
2741 // near our tip.
2742 near_chaintip_work = tip->nChainWork - std::min<arith_uint256>(144*GetBlockProof(*tip), tip->nChainWork);
2743 }
2744 return std::max(near_chaintip_work, m_chainman.MinimumChainWork());
2745}
2746
2753void PeerManagerImpl::HandleUnconnectingHeaders(CNode& pfrom, Peer& peer,
2754 const std::vector<CBlockHeader>& headers)
2755{
2756 // Try to fill in the missing headers.
2757 const CBlockIndex* best_header{WITH_LOCK(cs_main, return m_chainman.m_best_header)};
2758 if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) {
2759 LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d)\n",
2760 headers[0].GetHash().ToString(),
2761 headers[0].hashPrevBlock.ToString(),
2762 best_header->nHeight,
2763 pfrom.GetId());
2764 }
2765
2766 // Set hashLastUnknownBlock for this peer, so that if we
2767 // eventually get the headers - even from a different peer -
2768 // we can use this peer to download.
2769 WITH_LOCK(cs_main, UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()));
2770}
2771
2772bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const
2773{
2774 uint256 hashLastBlock;
2775 for (const CBlockHeader& header : headers) {
2776 if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
2777 return false;
2778 }
2779 hashLastBlock = header.GetHash();
2780 }
2781 return true;
2782}
2783
2784bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers)
2785{
2786 if (peer.m_headers_sync) {
2787 auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == MAX_HEADERS_RESULTS);
2788 // If it is a valid continuation, we should treat the existing getheaders request as responded to.
2789 if (result.success) peer.m_last_getheaders_timestamp = {};
2790 if (result.request_more) {
2791 auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
2792 // If we were instructed to ask for a locator, it should not be empty.
2793 Assume(!locator.vHave.empty());
2794 // We can only be instructed to request more if processing was successful.
2795 Assume(result.success);
2796 if (!locator.vHave.empty()) {
2797 // It should be impossible for the getheaders request to fail,
2798 // because we just cleared the last getheaders timestamp.
2799 bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer);
2800 Assume(sent_getheaders);
2801 LogPrint(BCLog::NET, "more getheaders (from %s) to peer=%d\n",
2802 locator.vHave.front().ToString(), pfrom.GetId());
2803 }
2804 }
2805
2806 if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) {
2807 peer.m_headers_sync.reset(nullptr);
2808
2809 // Delete this peer's entry in m_headers_presync_stats.
2810 // If this is m_headers_presync_bestpeer, it will be replaced later
2811 // by the next peer that triggers the else{} branch below.
2812 LOCK(m_headers_presync_mutex);
2813 m_headers_presync_stats.erase(pfrom.GetId());
2814 } else {
2815 // Build statistics for this peer's sync.
2816 HeadersPresyncStats stats;
2817 stats.first = peer.m_headers_sync->GetPresyncWork();
2818 if (peer.m_headers_sync->GetState() == HeadersSyncState::State::PRESYNC) {
2819 stats.second = {peer.m_headers_sync->GetPresyncHeight(),
2820 peer.m_headers_sync->GetPresyncTime()};
2821 }
2822
2823 // Update statistics in stats.
2824 LOCK(m_headers_presync_mutex);
2825 m_headers_presync_stats[pfrom.GetId()] = stats;
2826 auto best_it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
2827 bool best_updated = false;
2828 if (best_it == m_headers_presync_stats.end()) {
2829 // If the cached best peer is outdated, iterate over all remaining ones (including
2830 // newly updated one) to find the best one.
2831 NodeId peer_best{-1};
2832 const HeadersPresyncStats* stat_best{nullptr};
2833 for (const auto& [peer, stat] : m_headers_presync_stats) {
2834 if (!stat_best || stat > *stat_best) {
2835 peer_best = peer;
2836 stat_best = &stat;
2837 }
2838 }
2839 m_headers_presync_bestpeer = peer_best;
2840 best_updated = (peer_best == pfrom.GetId());
2841 } else if (best_it->first == pfrom.GetId() || stats > best_it->second) {
2842 // pfrom was and remains the best peer, or pfrom just became best.
2843 m_headers_presync_bestpeer = pfrom.GetId();
2844 best_updated = true;
2845 }
2846 if (best_updated && stats.second.has_value()) {
2847 // If the best peer updated, and it is in its first phase, signal.
2848 m_headers_presync_should_signal = true;
2849 }
2850 }
2851
2852 if (result.success) {
2853 // We only overwrite the headers passed in if processing was
2854 // successful.
2855 headers.swap(result.pow_validated_headers);
2856 }
2857
2858 return result.success;
2859 }
2860 // Either we didn't have a sync in progress, or something went wrong
2861 // processing these headers, or we are returning headers to the caller to
2862 // process.
2863 return false;
2864}
2865
2866bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex* chain_start_header, std::vector<CBlockHeader>& headers)
2867{
2868 // Calculate the claimed total work on this chain.
2869 arith_uint256 total_work = chain_start_header->nChainWork + CalculateClaimedHeadersWork(headers);
2870
2871 // Our dynamic anti-DoS threshold (minimum work required on a headers chain
2872 // before we'll store it)
2873 arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
2874
2875 // Avoid DoS via low-difficulty-headers by only processing if the headers
2876 // are part of a chain with sufficient work.
2877 if (total_work < minimum_chain_work) {
2878 // Only try to sync with this peer if their headers message was full;
2879 // otherwise they don't have more headers after this so no point in
2880 // trying to sync their too-little-work chain.
2881 if (headers.size() == MAX_HEADERS_RESULTS) {
2882 // Note: we could advance to the last header in this set that is
2883 // known to us, rather than starting at the first header (which we
2884 // may already have); however this is unlikely to matter much since
2885 // ProcessHeadersMessage() already handles the case where all
2886 // headers in a received message are already known and are
2887 // ancestors of m_best_header or chainActive.Tip(), by skipping
2888 // this logic in that case. So even if the first header in this set
2889 // of headers is known, some header in this set must be new, so
2890 // advancing to the first unknown header would be a small effect.
2891 LOCK(peer.m_headers_sync_mutex);
2892 peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(),
2893 chain_start_header, minimum_chain_work));
2894
2895 // Now a HeadersSyncState object for tracking this synchronization
2896 // is created, process the headers using it as normal. Failures are
2897 // handled inside of IsContinuationOfLowWorkHeadersSync.
2898 (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
2899 } else {
2900 LogPrint(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header->nHeight + headers.size(), pfrom.GetId());
2901 }
2902
2903 // The peer has not yet given us a chain that meets our work threshold,
2904 // so we want to prevent further processing of the headers in any case.
2905 headers = {};
2906 return true;
2907 }
2908
2909 return false;
2910}
2911
2912bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex* header)
2913{
2914 if (header == nullptr) {
2915 return false;
2916 } else if (m_chainman.m_best_header != nullptr && header == m_chainman.m_best_header->GetAncestor(header->nHeight)) {
2917 return true;
2918 } else if (m_chainman.ActiveChain().Contains(header)) {
2919 return true;
2920 }
2921 return false;
2922}
2923
2924bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer)
2925{
2926 const auto current_time = NodeClock::now();
2927
2928 // Only allow a new getheaders message to go out if we don't have a recent
2929 // one already in-flight
2930 if (current_time - peer.m_last_getheaders_timestamp > HEADERS_RESPONSE_TIME) {
2931 MakeAndPushMessage(pfrom, NetMsgType::GETHEADERS, locator, uint256());
2932 peer.m_last_getheaders_timestamp = current_time;
2933 return true;
2934 }
2935 return false;
2936}
2937
2938/*
2939 * Given a new headers tip ending in last_header, potentially request blocks towards that tip.
2940 * We require that the given tip have at least as much work as our tip, and for
2941 * our current tip to be "close to synced" (see CanDirectFetch()).
2942 */
2943void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header)
2944{
2945 LOCK(cs_main);
2946 CNodeState *nodestate = State(pfrom.GetId());
2947
2948 if (CanDirectFetch() && last_header.IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) {
2949 std::vector<const CBlockIndex*> vToFetch;
2950 const CBlockIndex* pindexWalk{&last_header};
2951 // Calculate all the blocks we'd need to switch to last_header, up to a limit.
2952 while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2953 if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
2954 !IsBlockRequested(pindexWalk->GetBlockHash()) &&
2955 (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || CanServeWitnesses(peer))) {
2956 // We don't have this block, and it's not yet in flight.
2957 vToFetch.push_back(pindexWalk);
2958 }
2959 pindexWalk = pindexWalk->pprev;
2960 }
2961 // If pindexWalk still isn't on our main chain, we're looking at a
2962 // very large reorg at a time we think we're close to caught up to
2963 // the main chain -- this shouldn't really happen. Bail out on the
2964 // direct fetch and rely on parallel download instead.
2965 if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
2966 LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
2967 last_header.GetBlockHash().ToString(),
2968 last_header.nHeight);
2969 } else {
2970 std::vector<CInv> vGetData;
2971 // Download as much as possible, from earliest to latest.
2972 for (const CBlockIndex* pindex : vToFetch | std::views::reverse) {
2973 if (nodestate->vBlocksInFlight.size() >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2974 // Can't download any more from this peer
2975 break;
2976 }
2977 uint32_t nFetchFlags = GetFetchFlags(peer);
2978 vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash());
2979 BlockRequested(pfrom.GetId(), *pindex);
2980 LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
2981 pindex->GetBlockHash().ToString(), pfrom.GetId());
2982 }
2983 if (vGetData.size() > 1) {
2984 LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
2985 last_header.GetBlockHash().ToString(),
2986 last_header.nHeight);
2987 }
2988 if (vGetData.size() > 0) {
2989 if (!m_opts.ignore_incoming_txs &&
2990 nodestate->m_provides_cmpctblocks &&
2991 vGetData.size() == 1 &&
2992 mapBlocksInFlight.size() == 1 &&
2993 last_header.pprev->IsValid(BLOCK_VALID_CHAIN)) {
2994 // In any case, we want to download using a compact block, not a regular one
2995 vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
2996 }
2997 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vGetData);
2998 }
2999 }
3000 }
3001}
3002
3008void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer,
3009 const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
3010{
3011 LOCK(cs_main);
3012 CNodeState *nodestate = State(pfrom.GetId());
3013
3014 UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash());
3015
3016 // From here, pindexBestKnownBlock should be guaranteed to be non-null,
3017 // because it is set in UpdateBlockAvailability. Some nullptr checks
3018 // are still present, however, as belt-and-suspenders.
3019
3020 if (received_new_header && last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
3021 nodestate->m_last_block_announcement = GetTime();
3022 }
3023
3024 // If we're in IBD, we want outbound peers that will serve us a useful
3025 // chain. Disconnect peers that are on chains with insufficient work.
3026 if (m_chainman.IsInitialBlockDownload() && !may_have_more_headers) {
3027 // If the peer has no more headers to give us, then we know we have
3028 // their tip.
3029 if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) {
3030 // This peer has too little work on their headers chain to help
3031 // us sync -- disconnect if it is an outbound disconnection
3032 // candidate.
3033 // Note: We compare their tip to the minimum chain work (rather than
3034 // m_chainman.ActiveChain().Tip()) because we won't start block download
3035 // until we have a headers chain that has at least
3036 // the minimum chain work, even if a peer has a chain past our tip,
3037 // as an anti-DoS measure.
3038 if (pfrom.IsOutboundOrBlockRelayConn()) {
3039 LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
3040 pfrom.fDisconnect = true;
3041 }
3042 }
3043 }
3044
3045 // If this is an outbound full-relay peer, check to see if we should protect
3046 // it from the bad/lagging chain logic.
3047 // Note that outbound block-relay peers are excluded from this protection, and
3048 // thus always subject to eviction under the bad/lagging chain logic.
3049 // See ChainSyncTimeoutState.
3050 if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
3051 if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
3052 LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
3053 nodestate->m_chain_sync.m_protect = true;
3054 ++m_outbound_peers_with_protect_from_disconnect;
3055 }
3056 }
3057}
3058
3059void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
3060 std::vector<CBlockHeader>&& headers,
3061 bool via_compact_block)
3062{
3063 size_t nCount = headers.size();
3064
3065 if (nCount == 0) {
3066 // Nothing interesting. Stop asking this peers for more headers.
3067 // If we were in the middle of headers sync, receiving an empty headers
3068 // message suggests that the peer suddenly has nothing to give us
3069 // (perhaps it reorged to our chain). Clear download state for this peer.
3070 LOCK(peer.m_headers_sync_mutex);
3071 if (peer.m_headers_sync) {
3072 peer.m_headers_sync.reset(nullptr);
3073 LOCK(m_headers_presync_mutex);
3074 m_headers_presync_stats.erase(pfrom.GetId());
3075 }
3076 // A headers message with no headers cannot be an announcement, so assume
3077 // it is a response to our last getheaders request, if there is one.
3078 peer.m_last_getheaders_timestamp = {};
3079 return;
3080 }
3081
3082 // Before we do any processing, make sure these pass basic sanity checks.
3083 // We'll rely on headers having valid proof-of-work further down, as an
3084 // anti-DoS criteria (note: this check is required before passing any
3085 // headers into HeadersSyncState).
3086 if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) {
3087 // Misbehaving() calls are handled within CheckHeadersPoW(), so we can
3088 // just return. (Note that even if a header is announced via compact
3089 // block, the header itself should be valid, so this type of error can
3090 // always be punished.)
3091 return;
3092 }
3093
3094 const CBlockIndex *pindexLast = nullptr;
3095
3096 // We'll set already_validated_work to true if these headers are
3097 // successfully processed as part of a low-work headers sync in progress
3098 // (either in PRESYNC or REDOWNLOAD phase).
3099 // If true, this will mean that any headers returned to us (ie during
3100 // REDOWNLOAD) can be validated without further anti-DoS checks.
3101 bool already_validated_work = false;
3102
3103 // If we're in the middle of headers sync, let it do its magic.
3104 bool have_headers_sync = false;
3105 {
3106 LOCK(peer.m_headers_sync_mutex);
3107
3108 already_validated_work = IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
3109
3110 // The headers we passed in may have been:
3111 // - untouched, perhaps if no headers-sync was in progress, or some
3112 // failure occurred
3113 // - erased, such as if the headers were successfully processed and no
3114 // additional headers processing needs to take place (such as if we
3115 // are still in PRESYNC)
3116 // - replaced with headers that are now ready for validation, such as
3117 // during the REDOWNLOAD phase of a low-work headers sync.
3118 // So just check whether we still have headers that we need to process,
3119 // or not.
3120 if (headers.empty()) {
3121 return;
3122 }
3123
3124 have_headers_sync = !!peer.m_headers_sync;
3125 }
3126
3127 // Do these headers connect to something in our block index?
3128 const CBlockIndex *chain_start_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock))};
3129 bool headers_connect_blockindex{chain_start_header != nullptr};
3130
3131 if (!headers_connect_blockindex) {
3132 // This could be a BIP 130 block announcement, use
3133 // special logic for handling headers that don't connect, as this
3134 // could be benign.
3135 HandleUnconnectingHeaders(pfrom, peer, headers);
3136 return;
3137 }
3138
3139 // If headers connect, assume that this is in response to any outstanding getheaders
3140 // request we may have sent, and clear out the time of our last request. Non-connecting
3141 // headers cannot be a response to a getheaders request.
3142 peer.m_last_getheaders_timestamp = {};
3143
3144 // If the headers we received are already in memory and an ancestor of
3145 // m_best_header or our tip, skip anti-DoS checks. These headers will not
3146 // use any more memory (and we are not leaking information that could be
3147 // used to fingerprint us).
3148 const CBlockIndex *last_received_header{nullptr};
3149 {
3150 LOCK(cs_main);
3151 last_received_header = m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash());
3152 if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
3153 already_validated_work = true;
3154 }
3155 }
3156
3157 // If our peer has NetPermissionFlags::NoBan privileges, then bypass our
3158 // anti-DoS logic (this saves bandwidth when we connect to a trusted peer
3159 // on startup).
3161 already_validated_work = true;
3162 }
3163
3164 // At this point, the headers connect to something in our block index.
3165 // Do anti-DoS checks to determine if we should process or store for later
3166 // processing.
3167 if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom,
3168 chain_start_header, headers)) {
3169 // If we successfully started a low-work headers sync, then there
3170 // should be no headers to process any further.
3171 Assume(headers.empty());
3172 return;
3173 }
3174
3175 // At this point, we have a set of headers with sufficient work on them
3176 // which can be processed.
3177
3178 // If we don't have the last header, then this peer will have given us
3179 // something new (if these headers are valid).
3180 bool received_new_header{last_received_header == nullptr};
3181
3182 // Now process all the headers.
3184 if (!m_chainman.ProcessNewBlockHeaders(headers, /*min_pow_checked=*/true, state, &pindexLast)) {
3185 if (state.IsInvalid()) {
3186 MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received");
3187 return;
3188 }
3189 }
3190 assert(pindexLast);
3191
3192 // Consider fetching more headers if we are not using our headers-sync mechanism.
3193 if (nCount == MAX_HEADERS_RESULTS && !have_headers_sync) {
3194 // Headers message had its maximum size; the peer may have more headers.
3195 if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) {
3196 LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
3197 pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
3198 }
3199 }
3200
3201 UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS);
3202
3203 // Consider immediately downloading blocks.
3204 HeadersDirectFetchBlocks(pfrom, peer, *pindexLast);
3205
3206 return;
3207}
3208
3209void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx, const TxValidationState& state,
3210 bool maybe_add_extra_compact_tx)
3211{
3212 AssertLockNotHeld(m_peer_mutex);
3213 AssertLockHeld(g_msgproc_mutex);
3214 AssertLockHeld(m_tx_download_mutex);
3215
3216 LogDebug(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n",
3217 ptx->GetHash().ToString(),
3218 ptx->GetWitnessHash().ToString(),
3219 nodeid,
3220 state.ToString());
3221
3223 return;
3224 } else if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) {
3225 // We can add the wtxid of this transaction to our reject filter.
3226 // Do not add txids of witness transactions or witness-stripped
3227 // transactions to the filter, as they can have been malleated;
3228 // adding such txids to the reject filter would potentially
3229 // interfere with relay of valid transactions from peers that
3230 // do not support wtxid-based relay. See
3231 // https://github.com/bitcoin/bitcoin/issues/8279 for details.
3232 // We can remove this restriction (and always add wtxids to
3233 // the filter even for witness stripped transactions) once
3234 // wtxid-based relay is broadly deployed.
3235 // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
3236 // for concerns around weakening security of unupgraded nodes
3237 // if we start doing this too early.
3239 // If the result is TX_RECONSIDERABLE, add it to m_lazy_recent_rejects_reconsiderable
3240 // because we should not download or submit this transaction by itself again, but may
3241 // submit it as part of a package later.
3242 RecentRejectsReconsiderableFilter().insert(ptx->GetWitnessHash().ToUint256());
3243 } else {
3244 RecentRejectsFilter().insert(ptx->GetWitnessHash().ToUint256());
3245 }
3246 m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
3247 // If the transaction failed for TX_INPUTS_NOT_STANDARD,
3248 // then we know that the witness was irrelevant to the policy
3249 // failure, since this check depends only on the txid
3250 // (the scriptPubKey being spent is covered by the txid).
3251 // Add the txid to the reject filter to prevent repeated
3252 // processing of this transaction in the event that child
3253 // transactions are later received (resulting in
3254 // parent-fetching by txid via the orphan-handling logic).
3255 // We only add the txid if it differs from the wtxid, to avoid wasting entries in the
3256 // rolling bloom filter.
3257 if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && ptx->HasWitness()) {
3258 RecentRejectsFilter().insert(ptx->GetHash().ToUint256());
3259 m_txrequest.ForgetTxHash(ptx->GetHash());
3260 }
3261 if (maybe_add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) {
3262 AddToCompactExtraTransactions(ptx);
3263 }
3264 }
3265
3266 MaybePunishNodeForTx(nodeid, state);
3267
3268 // If the tx failed in ProcessOrphanTx, it should be removed from the orphanage unless the
3269 // tx was still missing inputs. If the tx was not in the orphanage, EraseTx does nothing and returns 0.
3270 if (Assume(state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) && m_orphanage.EraseTx(ptx->GetWitnessHash()) > 0) {
3271 LogDebug(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString());
3272 }
3273}
3274
3275void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions)
3276{
3277 AssertLockNotHeld(m_peer_mutex);
3278 AssertLockHeld(g_msgproc_mutex);
3279 AssertLockHeld(m_tx_download_mutex);
3280
3281 // As this version of the transaction was acceptable, we can forget about any requests for it.
3282 // No-op if the tx is not in txrequest.
3283 m_txrequest.ForgetTxHash(tx->GetHash());
3284 m_txrequest.ForgetTxHash(tx->GetWitnessHash());
3285
3286 m_orphanage.AddChildrenToWorkSet(*tx);
3287 // If it came from the orphanage, remove it. No-op if the tx is not in txorphanage.
3288 m_orphanage.EraseTx(tx->GetWitnessHash());
3289
3290 LogDebug(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n",
3291 nodeid,
3292 tx->GetHash().ToString(),
3293 tx->GetWitnessHash().ToString(),
3294 m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
3295
3296 RelayTransaction(tx->GetHash(), tx->GetWitnessHash());
3297
3298 for (const CTransactionRef& removedTx : replaced_transactions) {
3299 AddToCompactExtraTransactions(removedTx);
3300 }
3301}
3302
3303void PeerManagerImpl::ProcessPackageResult(const PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result)
3304{
3305 AssertLockNotHeld(m_peer_mutex);
3306 AssertLockHeld(g_msgproc_mutex);
3307 AssertLockHeld(m_tx_download_mutex);
3308
3309 const auto& package = package_to_validate.m_txns;
3310 const auto& senders = package_to_validate.m_senders;
3311
3312 if (package_result.m_state.IsInvalid()) {
3313 RecentRejectsReconsiderableFilter().insert(GetPackageHash(package));
3314 }
3315 // We currently only expect to process 1-parent-1-child packages. Remove if this changes.
3316 if (!Assume(package.size() == 2)) return;
3317
3318 // Iterate backwards to erase in-package descendants from the orphanage before they become
3319 // relevant in AddChildrenToWorkSet.
3320 auto package_iter = package.rbegin();
3321 auto senders_iter = senders.rbegin();
3322 while (package_iter != package.rend()) {
3323 const auto& tx = *package_iter;
3324 const NodeId nodeid = *senders_iter;
3325 const auto it_result{package_result.m_tx_results.find(tx->GetWitnessHash())};
3326
3327 // It is not guaranteed that a result exists for every transaction.
3328 if (it_result != package_result.m_tx_results.end()) {
3329 const auto& tx_result = it_result->second;
3330 switch (tx_result.m_result_type) {
3332 {
3333 ProcessValidTx(nodeid, tx, tx_result.m_replaced_transactions);
3334 break;
3335 }
3338 {
3339 // Don't add to vExtraTxnForCompact, as these transactions should have already been
3340 // added there when added to the orphanage or rejected for TX_RECONSIDERABLE.
3341 // This should be updated if package submission is ever used for transactions
3342 // that haven't already been validated before.
3343 ProcessInvalidTx(nodeid, tx, tx_result.m_state, /*maybe_add_extra_compact_tx=*/false);
3344 break;
3345 }
3347 {
3348 // AlreadyHaveTx() should be catching transactions that are already in mempool.
3349 Assume(false);
3350 break;
3351 }
3352 }
3353 }
3354 package_iter++;
3355 senders_iter++;
3356 }
3357}
3358
3359std::optional<PeerManagerImpl::PackageToValidate> PeerManagerImpl::Find1P1CPackage(const CTransactionRef& ptx, NodeId nodeid)
3360{
3361 AssertLockNotHeld(m_peer_mutex);
3362 AssertLockHeld(g_msgproc_mutex);
3363 AssertLockHeld(m_tx_download_mutex);
3364
3365 const auto& parent_wtxid{ptx->GetWitnessHash()};
3366
3367 Assume(RecentRejectsReconsiderableFilter().contains(parent_wtxid.ToUint256()));
3368
3369 // Prefer children from this peer. This helps prevent censorship attempts in which an attacker
3370 // sends lots of fake children for the parent, and we (unluckily) keep selecting the fake
3371 // children instead of the real one provided by the honest peer.
3372 const auto cpfp_candidates_same_peer{m_orphanage.GetChildrenFromSamePeer(ptx, nodeid)};
3373
3374 // These children should be sorted from newest to oldest. In the (probably uncommon) case
3375 // of children that replace each other, this helps us accept the highest feerate (probably the
3376 // most recent) one efficiently.
3377 for (const auto& child : cpfp_candidates_same_peer) {
3378 Package maybe_cpfp_package{ptx, child};
3379 if (!RecentRejectsReconsiderableFilter().contains(GetPackageHash(maybe_cpfp_package))) {
3380 return PeerManagerImpl::PackageToValidate{ptx, child, nodeid, nodeid};
3381 }
3382 }
3383
3384 // If no suitable candidate from the same peer is found, also try children that were provided by
3385 // a different peer. This is useful because sometimes multiple peers announce both transactions
3386 // to us, and we happen to download them from different peers (we wouldn't have known that these
3387 // 2 transactions are related). We still want to find 1p1c packages then.
3388 //
3389 // If we start tracking all announcers of orphans, we can restrict this logic to parent + child
3390 // pairs in which both were provided by the same peer, i.e. delete this step.
3391 const auto cpfp_candidates_different_peer{m_orphanage.GetChildrenFromDifferentPeer(ptx, nodeid)};
3392
3393 // Find the first 1p1c that hasn't already been rejected. We randomize the order to not
3394 // create a bias that attackers can use to delay package acceptance.
3395 //
3396 // Create a random permutation of the indices.
3397 std::vector<size_t> tx_indices(cpfp_candidates_different_peer.size());
3398 std::iota(tx_indices.begin(), tx_indices.end(), 0);
3399 std::shuffle(tx_indices.begin(), tx_indices.end(), m_rng);
3400
3401 for (const auto index : tx_indices) {
3402 // If we already tried a package and failed for any reason, the combined hash was
3403 // cached in m_lazy_recent_rejects_reconsiderable.
3404 const auto [child_tx, child_sender] = cpfp_candidates_different_peer.at(index);
3405 Package maybe_cpfp_package{ptx, child_tx};
3406 if (!RecentRejectsReconsiderableFilter().contains(GetPackageHash(maybe_cpfp_package))) {
3407 return PeerManagerImpl::PackageToValidate{ptx, child_tx, nodeid, child_sender};
3408 }
3409 }
3410 return std::nullopt;
3411}
3412
3413bool PeerManagerImpl::ProcessOrphanTx(Peer& peer)
3414{
3415 AssertLockHeld(g_msgproc_mutex);
3416 LOCK2(::cs_main, m_tx_download_mutex);
3417
3418 CTransactionRef porphanTx = nullptr;
3419
3420 while (CTransactionRef porphanTx = m_orphanage.GetTxToReconsider(peer.m_id)) {
3421 const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx);
3422 const TxValidationState& state = result.m_state;
3423 const Txid& orphanHash = porphanTx->GetHash();
3424 const Wtxid& orphan_wtxid = porphanTx->GetWitnessHash();
3425
3427 LogPrint(BCLog::TXPACKAGES, " accepted orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString());
3428 ProcessValidTx(peer.m_id, porphanTx, result.m_replaced_transactions);
3429 return true;
3430 } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
3431 LogPrint(BCLog::TXPACKAGES, " invalid orphan tx %s (wtxid=%s) from peer=%d. %s\n",
3432 orphanHash.ToString(),
3433 orphan_wtxid.ToString(),
3434 peer.m_id,
3435 state.ToString());
3436
3437 if (Assume(state.IsInvalid() &&
3441 ProcessInvalidTx(peer.m_id, porphanTx, state, /*maybe_add_extra_compact_tx=*/false);
3442 }
3443 return true;
3444 }
3445 }
3446
3447 return false;
3448}
3449
3450bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer,
3451 BlockFilterType filter_type, uint32_t start_height,
3452 const uint256& stop_hash, uint32_t max_height_diff,
3453 const CBlockIndex*& stop_index,
3454 BlockFilterIndex*& filter_index)
3455{
3456 const bool supported_filter_type =
3457 (filter_type == BlockFilterType::BASIC &&
3458 (peer.m_our_services & NODE_COMPACT_FILTERS));
3459 if (!supported_filter_type) {
3460 LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n",
3461 node.GetId(), static_cast<uint8_t>(filter_type));
3462 node.fDisconnect = true;
3463 return false;
3464 }
3465
3466 {
3467 LOCK(cs_main);
3468 stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
3469
3470 // Check that the stop block exists and the peer would be allowed to fetch it.
3471 if (!stop_index || !BlockRequestAllowed(stop_index)) {
3472 LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
3473 node.GetId(), stop_hash.ToString());
3474 node.fDisconnect = true;
3475 return false;
3476 }
3477 }
3478
3479 uint32_t stop_height = stop_index->nHeight;
3480 if (start_height > stop_height) {
3481 LogPrint(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with "
3482 "start height %d and stop height %d\n",
3483 node.GetId(), start_height, stop_height);
3484 node.fDisconnect = true;
3485 return false;
3486 }
3487 if (stop_height - start_height >= max_height_diff) {
3488 LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n",
3489 node.GetId(), stop_height - start_height + 1, max_height_diff);
3490 node.fDisconnect = true;
3491 return false;
3492 }
3493
3494 filter_index = GetBlockFilterIndex(filter_type);
3495 if (!filter_index) {
3496 LogPrint(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type));
3497 return false;
3498 }
3499
3500 return true;
3501}
3502
3503void PeerManagerImpl::ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv)
3504{
3505 uint8_t filter_type_ser;
3506 uint32_t start_height;
3507 uint256 stop_hash;
3508
3509 vRecv >> filter_type_ser >> start_height >> stop_hash;
3510
3511 const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3512
3513 const CBlockIndex* stop_index;
3514 BlockFilterIndex* filter_index;
3515 if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash,
3516 MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
3517 return;
3518 }
3519
3520 std::vector<BlockFilter> filters;
3521 if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
3522 LogPrint(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3523 BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
3524 return;
3525 }
3526
3527 for (const auto& filter : filters) {
3528 MakeAndPushMessage(node, NetMsgType::CFILTER, filter);
3529 }
3530}
3531
3532void PeerManagerImpl::ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv)
3533{
3534 uint8_t filter_type_ser;
3535 uint32_t start_height;
3536 uint256 stop_hash;
3537
3538 vRecv >> filter_type_ser >> start_height >> stop_hash;
3539
3540 const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3541
3542 const CBlockIndex* stop_index;
3543 BlockFilterIndex* filter_index;
3544 if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash,
3545 MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
3546 return;
3547 }
3548
3549 uint256 prev_header;
3550 if (start_height > 0) {
3551 const CBlockIndex* const prev_block =
3552 stop_index->GetAncestor(static_cast<int>(start_height - 1));
3553 if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
3554 LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3555 BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString());
3556 return;
3557 }
3558 }
3559
3560 std::vector<uint256> filter_hashes;
3561 if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) {
3562 LogPrint(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3563 BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
3564 return;
3565 }
3566
3567 MakeAndPushMessage(node, NetMsgType::CFHEADERS,
3568 filter_type_ser,
3569 stop_index->GetBlockHash(),
3570 prev_header,
3571 filter_hashes);
3572}
3573
3574void PeerManagerImpl::ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv)
3575{
3576 uint8_t filter_type_ser;
3577 uint256 stop_hash;
3578
3579 vRecv >> filter_type_ser >> stop_hash;
3580
3581 const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3582
3583 const CBlockIndex* stop_index;
3584 BlockFilterIndex* filter_index;
3585 if (!PrepareBlockFilterRequest(node, peer, filter_type, /*start_height=*/0, stop_hash,
3586 /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
3587 stop_index, filter_index)) {
3588 return;
3589 }
3590
3591 std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
3592
3593 // Populate headers.
3594 const CBlockIndex* block_index = stop_index;
3595 for (int i = headers.size() - 1; i >= 0; i--) {
3596 int height = (i + 1) * CFCHECKPT_INTERVAL;
3597 block_index = block_index->GetAncestor(height);
3598
3599 if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
3600 LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3601 BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString());
3602 return;
3603 }
3604 }
3605
3606 MakeAndPushMessage(node, NetMsgType::CFCHECKPT,
3607 filter_type_ser,
3608 stop_index->GetBlockHash(),
3609 headers);
3610}
3611
3612void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked)
3613{
3614 bool new_block{false};
3615 m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked, &new_block);
3616 if (new_block) {
3617 node.m_last_block_time = GetTime<std::chrono::seconds>();
3618 // In case this block came from a different peer than we requested
3619 // from, we can erase the block request now anyway (as we just stored
3620 // this block to disk).
3621 LOCK(cs_main);
3622 RemoveBlockRequest(block->GetHash(), std::nullopt);
3623 } else {
3624 LOCK(cs_main);
3625 mapBlockSource.erase(block->GetHash());
3626 }
3627}
3628
3629void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions)
3630{
3631 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3632 bool fBlockRead{false};
3633 {
3634 LOCK(cs_main);
3635
3636 auto range_flight = mapBlocksInFlight.equal_range(block_transactions.blockhash);
3637 size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
3638 bool requested_block_from_this_peer{false};
3639
3640 // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
3641 bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
3642
3643 while (range_flight.first != range_flight.second) {
3644 auto [node_id, block_it] = range_flight.first->second;
3645 if (node_id == pfrom.GetId() && block_it->partialBlock) {
3646 requested_block_from_this_peer = true;
3647 break;
3648 }
3649 range_flight.first++;
3650 }
3651
3652 if (!requested_block_from_this_peer) {
3653 LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId());
3654 return;
3655 }
3656
3657 PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock;
3658 ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn);
3659 if (status == READ_STATUS_INVALID) {
3660 RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
3661 Misbehaving(peer, "invalid compact block/non-matching block transactions");
3662 return;
3663 } else if (status == READ_STATUS_FAILED) {
3664 if (first_in_flight) {
3665 // Might have collided, fall back to getdata now :(
3666 std::vector<CInv> invs;
3667 invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash);
3668 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs);
3669 } else {
3670 RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId());
3671 LogPrint(BCLog::NET, "Peer %d sent us a compact block but it failed to reconstruct, waiting on first download to complete\n", pfrom.GetId());
3672 return;
3673 }
3674 } else {
3675 // Block is either okay, or possibly we received
3676 // READ_STATUS_CHECKBLOCK_FAILED.
3677 // Note that CheckBlock can only fail for one of a few reasons:
3678 // 1. bad-proof-of-work (impossible here, because we've already
3679 // accepted the header)
3680 // 2. merkleroot doesn't match the transactions given (already
3681 // caught in FillBlock with READ_STATUS_FAILED, so
3682 // impossible here)
3683 // 3. the block is otherwise invalid (eg invalid coinbase,
3684 // block is too big, too many legacy sigops, etc).
3685 // So if CheckBlock failed, #3 is the only possibility.
3686 // Under BIP 152, we don't discourage the peer unless proof of work is
3687 // invalid (we don't require all the stateless checks to have
3688 // been run). This is handled below, so just treat this as
3689 // though the block was successfully read, and rely on the
3690 // handling in ProcessNewBlock to ensure the block index is
3691 // updated, etc.
3692 RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer
3693 fBlockRead = true;
3694 // mapBlockSource is used for potentially punishing peers and
3695 // updating which peers send us compact blocks, so the race
3696 // between here and cs_main in ProcessNewBlock is fine.
3697 // BIP 152 permits peers to relay compact blocks after validating
3698 // the header only; we should not punish peers if the block turns
3699 // out to be invalid.
3700 mapBlockSource.emplace(block_transactions.blockhash, std::make_pair(pfrom.GetId(), false));
3701 }
3702 } // Don't hold cs_main when we call into ProcessNewBlock
3703 if (fBlockRead) {
3704 // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
3705 // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
3706 // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
3707 // disk-space attacks), but this should be safe due to the
3708 // protections in the compact block handler -- see related comment
3709 // in compact block optimistic reconstruction handling.
3710 ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true);
3711 }
3712 return;
3713}
3714
3715void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv,
3716 const std::chrono::microseconds time_received,
3717 const std::atomic<bool>& interruptMsgProc)
3718{
3719 AssertLockHeld(g_msgproc_mutex);
3720
3721 LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
3722
3723 PeerRef peer = GetPeerRef(pfrom.GetId());
3724 if (peer == nullptr) return;
3725
3726 if (msg_type == NetMsgType::VERSION) {
3727 if (pfrom.nVersion != 0) {
3728 LogPrint(BCLog::NET, "redundant version message from peer=%d\n", pfrom.GetId());
3729 return;
3730 }
3731
3732 int64_t nTime;
3733 CService addrMe;
3734 uint64_t nNonce = 1;
3735 ServiceFlags nServices;
3736 int nVersion;
3737 std::string cleanSubVer;
3738 int starting_height = -1;
3739 bool fRelay = true;
3740
3741 vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
3742 if (nTime < 0) {
3743 nTime = 0;
3744 }
3745 vRecv.ignore(8); // Ignore the addrMe service bits sent by the peer
3746 vRecv >> CNetAddr::V1(addrMe);
3747 if (!pfrom.IsInboundConn())
3748 {
3749 // Overwrites potentially existing services. In contrast to this,
3750 // unvalidated services received via gossip relay in ADDR/ADDRV2
3751 // messages are only ever added but cannot replace existing ones.
3752 m_addrman.SetServices(pfrom.addr, nServices);
3753 }
3754 if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices))
3755 {
3756 LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices));
3757 pfrom.fDisconnect = true;
3758 return;
3759 }
3760
3761 if (nVersion < MIN_PEER_PROTO_VERSION) {
3762 // disconnect from peers older than this proto version
3763 LogPrint(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom.GetId(), nVersion);
3764 pfrom.fDisconnect = true;
3765 return;
3766 }
3767
3768 if (!vRecv.empty()) {
3769 // The version message includes information about the sending node which we don't use:
3770 // - 8 bytes (service bits)
3771 // - 16 bytes (ipv6 address)
3772 // - 2 bytes (port)
3773 vRecv.ignore(26);
3774 vRecv >> nNonce;
3775 }
3776 if (!vRecv.empty()) {
3777 std::string strSubVer;
3778 vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
3779 cleanSubVer = SanitizeString(strSubVer);
3780 }
3781 if (!vRecv.empty()) {
3782 vRecv >> starting_height;
3783 }
3784 if (!vRecv.empty())
3785 vRecv >> fRelay;
3786 // Disconnect if we connected to ourself
3787 if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce))
3788 {
3789 LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToStringAddrPort());
3790 pfrom.fDisconnect = true;
3791 return;
3792 }
3793
3794 if (pfrom.IsInboundConn() && addrMe.IsRoutable())
3795 {
3796 SeenLocal(addrMe);
3797 }
3798
3799 // Inbound peers send us their version message when they connect.
3800 // We send our version message in response.
3801 if (pfrom.IsInboundConn()) {
3802 PushNodeVersion(pfrom, *peer);
3803 }
3804
3805 // Change version
3806 const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION);
3807 pfrom.SetCommonVersion(greatest_common_version);
3808 pfrom.nVersion = nVersion;
3809
3810 if (greatest_common_version >= WTXID_RELAY_VERSION) {
3811 MakeAndPushMessage(pfrom, NetMsgType::WTXIDRELAY);
3812 }
3813
3814 // Signal ADDRv2 support (BIP155).
3815 if (greatest_common_version >= 70016) {
3816 // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some
3817 // implementations reject messages they don't know. As a courtesy, don't send
3818 // it to nodes with a version before 70016, as no software is known to support
3819 // BIP155 that doesn't announce at least that protocol version number.
3820 MakeAndPushMessage(pfrom, NetMsgType::SENDADDRV2);
3821 }
3822
3823 pfrom.m_has_all_wanted_services = HasAllDesirableServiceFlags(nServices);
3824 peer->m_their_services = nServices;
3825 pfrom.SetAddrLocal(addrMe);
3826 {
3827 LOCK(pfrom.m_subver_mutex);
3828 pfrom.cleanSubVer = cleanSubVer;
3829 }
3830 peer->m_starting_height = starting_height;
3831
3832 // Only initialize the Peer::TxRelay m_relay_txs data structure if:
3833 // - this isn't an outbound block-relay-only connection, and
3834 // - this isn't an outbound feeler connection, and
3835 // - fRelay=true (the peer wishes to receive transaction announcements)
3836 // or we're offering NODE_BLOOM to this peer. NODE_BLOOM means that
3837 // the peer may turn on transaction relay later.
3838 if (!pfrom.IsBlockOnlyConn() &&
3839 !pfrom.IsFeelerConn() &&
3840 (fRelay || (peer->m_our_services & NODE_BLOOM))) {
3841 auto* const tx_relay = peer->SetTxRelay();
3842 {
3843 LOCK(tx_relay->m_bloom_filter_mutex);
3844 tx_relay->m_relay_txs = fRelay; // set to true after we get the first filter* message
3845 }
3846 if (fRelay) pfrom.m_relays_txs = true;
3847 }
3848
3849 if (greatest_common_version >= WTXID_RELAY_VERSION && m_txreconciliation) {
3850 // Per BIP-330, we announce txreconciliation support if:
3851 // - protocol version per the peer's VERSION message supports WTXID_RELAY;
3852 // - transaction relay is supported per the peer's VERSION message
3853 // - this is not a block-relay-only connection and not a feeler
3854 // - this is not an addr fetch connection;
3855 // - we are not in -blocksonly mode.
3856 const auto* tx_relay = peer->GetTxRelay();
3857 if (tx_relay && WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs) &&
3858 !pfrom.IsAddrFetchConn() && !m_opts.ignore_incoming_txs) {
3859 const uint64_t recon_salt = m_txreconciliation->PreRegisterPeer(pfrom.GetId());
3860 MakeAndPushMessage(pfrom, NetMsgType::SENDTXRCNCL,
3861 TXRECONCILIATION_VERSION, recon_salt);
3862 }
3863 }
3864
3865 MakeAndPushMessage(pfrom, NetMsgType::VERACK);
3866
3867 // Potentially mark this peer as a preferred download peer.
3868 {
3869 LOCK(cs_main);
3870 CNodeState* state = State(pfrom.GetId());
3871 state->fPreferredDownload = (!pfrom.IsInboundConn() || pfrom.HasPermission(NetPermissionFlags::NoBan)) && !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer);
3872 m_num_preferred_download_peers += state->fPreferredDownload;
3873 }
3874
3875 // Attempt to initialize address relay for outbound peers and use result
3876 // to decide whether to send GETADDR, so that we don't send it to
3877 // inbound or outbound block-relay-only peers.
3878 bool send_getaddr{false};
3879 if (!pfrom.IsInboundConn()) {
3880 send_getaddr = SetupAddressRelay(pfrom, *peer);
3881 }
3882 if (send_getaddr) {
3883 // Do a one-time address fetch to help populate/update our addrman.
3884 // If we're starting up for the first time, our addrman may be pretty
3885 // empty, so this mechanism is important to help us connect to the network.
3886 // We skip this for block-relay-only peers. We want to avoid
3887 // potentially leaking addr information and we do not want to
3888 // indicate to the peer that we will participate in addr relay.
3889 MakeAndPushMessage(pfrom, NetMsgType::GETADDR);
3890 peer->m_getaddr_sent = true;
3891 // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND addresses in response
3892 // (bypassing the MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
3893 peer->m_addr_token_bucket += MAX_ADDR_TO_SEND;
3894 }
3895
3896 if (!pfrom.IsInboundConn()) {
3897 // For non-inbound connections, we update the addrman to record
3898 // connection success so that addrman will have an up-to-date
3899 // notion of which peers are online and available.
3900 //
3901 // While we strive to not leak information about block-relay-only
3902 // connections via the addrman, not moving an address to the tried
3903 // table is also potentially detrimental because new-table entries
3904 // are subject to eviction in the event of addrman collisions. We
3905 // mitigate the information-leak by never calling
3906 // AddrMan::Connected() on block-relay-only peers; see
3907 // FinalizeNode().
3908 //
3909 // This moves an address from New to Tried table in Addrman,
3910 // resolves tried-table collisions, etc.
3911 m_addrman.Good(pfrom.addr);
3912 }
3913
3914 std::string remoteAddr;
3915 if (fLogIPs)
3916 remoteAddr = ", peeraddr=" + pfrom.addr.ToStringAddrPort();
3917
3918 const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
3919 LogPrint(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s\n",
3920 cleanSubVer, pfrom.nVersion,
3921 peer->m_starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.GetId(),
3922 remoteAddr, (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
3923
3924 peer->m_time_offset = NodeSeconds{std::chrono::seconds{nTime}} - Now<NodeSeconds>();
3925 if (!pfrom.IsInboundConn()) {
3926 // Don't use timedata samples from inbound peers to make it
3927 // harder for others to create false warnings about our clock being out of sync.
3928 m_outbound_time_offsets.Add(peer->m_time_offset);
3929 m_outbound_time_offsets.WarnIfOutOfSync();
3930 }
3931
3932 // If the peer is old enough to have the old alert system, send it the final alert.
3933 if (greatest_common_version <= 70012) {
3934 const auto finalAlert{ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50")};
3935 MakeAndPushMessage(pfrom, "alert", Span{finalAlert});
3936 }
3937
3938 // Feeler connections exist only to verify if address is online.
3939 if (pfrom.IsFeelerConn()) {
3940 LogPrint(BCLog::NET, "feeler connection completed peer=%d; disconnecting\n", pfrom.GetId());
3941 pfrom.fDisconnect = true;
3942 }
3943 return;
3944 }
3945
3946 if (pfrom.nVersion == 0) {
3947 // Must have a version message before anything else
3948 LogPrint(BCLog::NET, "non-version message before version handshake. Message \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3949 return;
3950 }
3951
3952 if (msg_type == NetMsgType::VERACK) {
3953 if (pfrom.fSuccessfullyConnected) {
3954 LogPrint(BCLog::NET, "ignoring redundant verack message from peer=%d\n", pfrom.GetId());
3955 return;
3956 }
3957
3958 // Log successful connections unconditionally for outbound, but not for inbound as those
3959 // can be triggered by an attacker at high rate.
3961 const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
3962 LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s\n",
3963 pfrom.ConnectionTypeAsString(),
3964 TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type),
3965 pfrom.nVersion.load(), peer->m_starting_height,
3966 pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToStringAddrPort()) : ""),
3967 (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
3968 }
3969
3971 // Tell our peer we are willing to provide version 2 cmpctblocks.
3972 // However, we do not request new block announcements using
3973 // cmpctblock messages.
3974 // We send this to non-NODE NETWORK peers as well, because
3975 // they may wish to request compact blocks from us
3976 MakeAndPushMessage(pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION);
3977 }
3978
3979 if (m_txreconciliation) {
3980 if (!peer->m_wtxid_relay || !m_txreconciliation->IsPeerRegistered(pfrom.GetId())) {
3981 // We could have optimistically pre-registered/registered the peer. In that case,
3982 // we should forget about the reconciliation state here if this wasn't followed
3983 // by WTXIDRELAY (since WTXIDRELAY can't be announced later).
3984 m_txreconciliation->ForgetPeer(pfrom.GetId());
3985 }
3986 }
3987
3988 if (auto tx_relay = peer->GetTxRelay()) {
3989 // `TxRelay::m_tx_inventory_to_send` must be empty before the
3990 // version handshake is completed as
3991 // `TxRelay::m_next_inv_send_time` is first initialised in
3992 // `SendMessages` after the verack is received. Any transactions
3993 // received during the version handshake would otherwise
3994 // immediately be advertised without random delay, potentially
3995 // leaking the time of arrival to a spy.
3997 tx_relay->m_tx_inventory_mutex,
3998 return tx_relay->m_tx_inventory_to_send.empty() &&
3999 tx_relay->m_next_inv_send_time == 0s));
4000 }
4001
4002 pfrom.fSuccessfullyConnected = true;
4003 return;
4004 }
4005
4006 if (msg_type == NetMsgType::SENDHEADERS) {
4007 peer->m_prefers_headers = true;
4008 return;
4009 }
4010
4011 if (msg_type == NetMsgType::SENDCMPCT) {
4012 bool sendcmpct_hb{false};
4013 uint64_t sendcmpct_version{0};
4014 vRecv >> sendcmpct_hb >> sendcmpct_version;
4015
4016 // Only support compact block relay with witnesses
4017 if (sendcmpct_version != CMPCTBLOCKS_VERSION) return;
4018
4019 LOCK(cs_main);
4020 CNodeState* nodestate = State(pfrom.GetId());
4021 nodestate->m_provides_cmpctblocks = true;
4022 nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
4023 // save whether peer selects us as BIP152 high-bandwidth peer
4024 // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth)
4025 pfrom.m_bip152_highbandwidth_from = sendcmpct_hb;
4026 return;
4027 }
4028
4029 // BIP339 defines feature negotiation of wtxidrelay, which must happen between
4030 // VERSION and VERACK to avoid relay problems from switching after a connection is up.
4031 if (msg_type == NetMsgType::WTXIDRELAY) {
4032 if (pfrom.fSuccessfullyConnected) {
4033 // Disconnect peers that send a wtxidrelay message after VERACK.
4034 LogPrint(BCLog::NET, "wtxidrelay received after verack from peer=%d; disconnecting\n", pfrom.GetId());
4035 pfrom.fDisconnect = true;
4036 return;
4037 }
4038 if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) {
4039 if (!peer->m_wtxid_relay) {
4040 peer->m_wtxid_relay = true;
4041 m_wtxid_relay_peers++;
4042 } else {
4043 LogPrint(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId());
4044 }
4045 } else {
4046 LogPrint(BCLog::NET, "ignoring wtxidrelay due to old common version=%d from peer=%d\n", pfrom.GetCommonVersion(), pfrom.GetId());
4047 }
4048 return;
4049 }
4050
4051 // BIP155 defines feature negotiation of addrv2 and sendaddrv2, which must happen
4052 // between VERSION and VERACK.
4053 if (msg_type == NetMsgType::SENDADDRV2) {
4054 if (pfrom.fSuccessfullyConnected) {
4055 // Disconnect peers that send a SENDADDRV2 message after VERACK.
4056 LogPrint(BCLog::NET, "sendaddrv2 received after verack from peer=%d; disconnecting\n", pfrom.GetId());
4057 pfrom.fDisconnect = true;
4058 return;
4059 }
4060 peer->m_wants_addrv2 = true;
4061 return;
4062 }
4063
4064 // Received from a peer demonstrating readiness to announce transactions via reconciliations.
4065 // This feature negotiation must happen between VERSION and VERACK to avoid relay problems
4066 // from switching announcement protocols after the connection is up.
4067 if (msg_type == NetMsgType::SENDTXRCNCL) {
4068 if (!m_txreconciliation) {
4069 LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.GetId());
4070 return;
4071 }
4072
4073 if (pfrom.fSuccessfullyConnected) {
4074 LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received after verack from peer=%d; disconnecting\n", pfrom.GetId());
4075 pfrom.fDisconnect = true;
4076 return;
4077 }
4078
4079 // Peer must not offer us reconciliations if we specified no tx relay support in VERSION.
4080 if (RejectIncomingTxs(pfrom)) {
4081 LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received from peer=%d to which we indicated no tx relay; disconnecting\n", pfrom.GetId());
4082 pfrom.fDisconnect = true;
4083 return;
4084 }
4085
4086 // Peer must not offer us reconciliations if they specified no tx relay support in VERSION.
4087 // This flag might also be false in other cases, but the RejectIncomingTxs check above
4088 // eliminates them, so that this flag fully represents what we are looking for.
4089 const auto* tx_relay = peer->GetTxRelay();
4090 if (!tx_relay || !WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs)) {
4091 LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received from peer=%d which indicated no tx relay to us; disconnecting\n", pfrom.GetId());
4092 pfrom.fDisconnect = true;
4093 return;
4094 }
4095
4096 uint32_t peer_txreconcl_version;
4097 uint64_t remote_salt;
4098 vRecv >> peer_txreconcl_version >> remote_salt;
4099
4100 const ReconciliationRegisterResult result = m_txreconciliation->RegisterPeer(pfrom.GetId(), pfrom.IsInboundConn(),
4101 peer_txreconcl_version, remote_salt);
4102 switch (result) {
4104 LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "Ignore unexpected txreconciliation signal from peer=%d\n", pfrom.GetId());
4105 break;
4107 break;
4109 LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "txreconciliation protocol violation from peer=%d (sendtxrcncl received from already registered peer); disconnecting\n", pfrom.GetId());
4110 pfrom.fDisconnect = true;
4111 return;
4113 LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "txreconciliation protocol violation from peer=%d; disconnecting\n", pfrom.GetId());
4114 pfrom.fDisconnect = true;
4115 return;
4116 }
4117 return;
4118 }
4119
4120 if (!pfrom.fSuccessfullyConnected) {
4121 LogPrint(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
4122 return;
4123 }
4124
4125 if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
4126 const auto ser_params{
4127 msg_type == NetMsgType::ADDRV2 ?
4128 // Set V2 param so that the CNetAddr and CAddress
4129 // unserialize methods know that an address in v2 format is coming.
4132 };
4133
4134 std::vector<CAddress> vAddr;
4135
4136 vRecv >> ser_params(vAddr);
4137
4138 if (!SetupAddressRelay(pfrom, *peer)) {
4139 LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
4140 return;
4141 }
4142
4143 if (vAddr.size() > MAX_ADDR_TO_SEND)
4144 {
4145 Misbehaving(*peer, strprintf("%s message size = %u", msg_type, vAddr.size()));
4146 return;
4147 }
4148
4149 // Store the new addresses
4150 std::vector<CAddress> vAddrOk;
4151 const auto current_a_time{Now<NodeSeconds>()};
4152
4153 // Update/increment addr rate limiting bucket.
4154 const auto current_time{GetTime<std::chrono::microseconds>()};
4155 if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
4156 // Don't increment bucket if it's already full
4157 const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us);
4158 const double increment = Ticks<SecondsDouble>(time_diff) * MAX_ADDR_RATE_PER_SECOND;
4159 peer->m_addr_token_bucket = std::min<double>(peer->m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET);
4160 }
4161 peer->m_addr_token_timestamp = current_time;
4162
4163 const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr);
4164 uint64_t num_proc = 0;
4165 uint64_t num_rate_limit = 0;
4166 std::shuffle(vAddr.begin(), vAddr.end(), m_rng);
4167 for (CAddress& addr : vAddr)
4168 {
4169 if (interruptMsgProc)
4170 return;
4171
4172 // Apply rate limiting.
4173 if (peer->m_addr_token_bucket < 1.0) {
4174 if (rate_limited) {
4175 ++num_rate_limit;
4176 continue;
4177 }
4178 } else {
4179 peer->m_addr_token_bucket -= 1.0;
4180 }
4181 // We only bother storing full nodes, though this may include
4182 // things which we would not make an outbound connection to, in
4183 // part because we may make feeler connections to them.
4184 if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices))
4185 continue;
4186
4187 if (addr.nTime <= NodeSeconds{100000000s} || addr.nTime > current_a_time + 10min) {
4188 addr.nTime = current_a_time - 5 * 24h;
4189 }
4190 AddAddressKnown(*peer, addr);
4191 if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
4192 // Do not process banned/discouraged addresses beyond remembering we received them
4193 continue;
4194 }
4195 ++num_proc;
4196 const bool reachable{g_reachable_nets.Contains(addr)};
4197 if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) {
4198 // Relay to a limited number of other nodes
4199 RelayAddress(pfrom.GetId(), addr, reachable);
4200 }
4201 // Do not store addresses outside our network
4202 if (reachable) {
4203 vAddrOk.push_back(addr);
4204 }
4205 }
4206 peer->m_addr_processed += num_proc;
4207 peer->m_addr_rate_limited += num_rate_limit;
4208 LogPrint(BCLog::NET, "Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d\n",
4209 vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
4210
4211 m_addrman.Add(vAddrOk, pfrom.addr, 2h);
4212 if (vAddr.size() < 1000) peer->m_getaddr_sent = false;
4213
4214 // AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements
4215 if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
4216 LogPrint(BCLog::NET, "addrfetch connection completed peer=%d; disconnecting\n", pfrom.GetId());
4217 pfrom.fDisconnect = true;
4218 }
4219 return;
4220 }
4221
4222 if (msg_type == NetMsgType::INV) {
4223 std::vector<CInv> vInv;
4224 vRecv >> vInv;
4225 if (vInv.size() > MAX_INV_SZ)
4226 {
4227 Misbehaving(*peer, strprintf("inv message size = %u", vInv.size()));
4228 return;
4229 }
4230
4231 const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
4232
4233 LOCK2(cs_main, m_tx_download_mutex);
4234
4235 const auto current_time{GetTime<std::chrono::microseconds>()};
4236 uint256* best_block{nullptr};
4237
4238 for (CInv& inv : vInv) {
4239 if (interruptMsgProc) return;
4240
4241 // Ignore INVs that don't match wtxidrelay setting.
4242 // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting.
4243 // This is fine as no INV messages are involved in that process.
4244 if (peer->m_wtxid_relay) {
4245 if (inv.IsMsgTx()) continue;
4246 } else {
4247 if (inv.IsMsgWtx()) continue;
4248 }
4249
4250 if (inv.IsMsgBlk()) {
4251 const bool fAlreadyHave = AlreadyHaveBlock(inv.hash);
4252 LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
4253
4254 UpdateBlockAvailability(pfrom.GetId(), inv.hash);
4255 if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() && !IsBlockRequested(inv.hash)) {
4256 // Headers-first is the primary method of announcement on
4257 // the network. If a node fell back to sending blocks by
4258 // inv, it may be for a re-org, or because we haven't
4259 // completed initial headers sync. The final block hash
4260 // provided should be the highest, so send a getheaders and
4261 // then fetch the blocks we need to catch up.
4262 best_block = &inv.hash;
4263 }
4264 } else if (inv.IsGenTxMsg()) {
4265 if (reject_tx_invs) {
4266 LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId());
4267 pfrom.fDisconnect = true;
4268 return;
4269 }
4270 const GenTxid gtxid = ToGenTxid(inv);
4271 const bool fAlreadyHave = AlreadyHaveTx(gtxid, /*include_reconsiderable=*/true);
4272 LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
4273
4274 AddKnownTx(*peer, inv.hash);
4275 if (!fAlreadyHave && !m_chainman.IsInitialBlockDownload()) {
4276 AddTxAnnouncement(pfrom, gtxid, current_time);
4277 }
4278 } else {
4279 LogPrint(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId());
4280 }
4281 }
4282
4283 if (best_block != nullptr) {
4284 // If we haven't started initial headers-sync with this peer, then
4285 // consider sending a getheaders now. On initial startup, there's a
4286 // reliability vs bandwidth tradeoff, where we are only trying to do
4287 // initial headers sync with one peer at a time, with a long
4288 // timeout (at which point, if the sync hasn't completed, we will
4289 // disconnect the peer and then choose another). In the meantime,
4290 // as new blocks are found, we are willing to add one new peer per
4291 // block to sync with as well, to sync quicker in the case where
4292 // our initial peer is unresponsive (but less bandwidth than we'd
4293 // use if we turned on sync with all peers).
4294 CNodeState& state{*Assert(State(pfrom.GetId()))};
4295 if (state.fSyncStarted || (!peer->m_inv_triggered_getheaders_before_sync && *best_block != m_last_block_inv_triggering_headers_sync)) {
4296 if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer)) {
4297 LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
4298 m_chainman.m_best_header->nHeight, best_block->ToString(),
4299 pfrom.GetId());
4300 }
4301 if (!state.fSyncStarted) {
4302 peer->m_inv_triggered_getheaders_before_sync = true;
4303 // Update the last block hash that triggered a new headers
4304 // sync, so that we don't turn on headers sync with more
4305 // than 1 new peer every new block.
4306 m_last_block_inv_triggering_headers_sync = *best_block;
4307 }
4308 }
4309 }
4310
4311 return;
4312 }
4313
4314 if (msg_type == NetMsgType::GETDATA) {
4315 std::vector<CInv> vInv;
4316 vRecv >> vInv;
4317 if (vInv.size() > MAX_INV_SZ)
4318 {
4319 Misbehaving(*peer, strprintf("getdata message size = %u", vInv.size()));
4320 return;
4321 }
4322
4323 LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId());
4324
4325 if (vInv.size() > 0) {
4326 LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId());
4327 }
4328
4329 {
4330 LOCK(peer->m_getdata_requests_mutex);
4331 peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end());
4332 ProcessGetData(pfrom, *peer, interruptMsgProc);
4333 }
4334
4335 return;
4336 }
4337
4338 if (msg_type == NetMsgType::GETBLOCKS) {
4339 CBlockLocator locator;
4340 uint256 hashStop;
4341 vRecv >> locator >> hashStop;
4342
4343 if (locator.vHave.size() > MAX_LOCATOR_SZ) {
4344 LogPrint(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
4345 pfrom.fDisconnect = true;
4346 return;
4347 }
4348
4349 // We might have announced the currently-being-connected tip using a
4350 // compact block, which resulted in the peer sending a getblocks
4351 // request, which we would otherwise respond to without the new block.
4352 // To avoid this situation we simply verify that we are on our best
4353 // known chain now. This is super overkill, but we handle it better
4354 // for getheaders requests, and there are no known nodes which support
4355 // compact blocks but still use getblocks to request blocks.
4356 {
4357 std::shared_ptr<const CBlock> a_recent_block;
4358 {
4359 LOCK(m_most_recent_block_mutex);
4360 a_recent_block = m_most_recent_block;
4361 }
4363 if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
4364 LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
4365 }
4366 }
4367
4368 LOCK(cs_main);
4369
4370 // Find the last block the caller has in the main chain
4371 const CBlockIndex* pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4372
4373 // Send the rest of the chain
4374 if (pindex)
4375 pindex = m_chainman.ActiveChain().Next(pindex);
4376 int nLimit = 500;
4377 LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId());
4378 for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
4379 {
4380 if (pindex->GetBlockHash() == hashStop)
4381 {
4382 LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4383 break;
4384 }
4385 // If pruning, don't inv blocks unless we have on disk and are likely to still have
4386 // for some reasonable time window (1 hour) that block relay might require.
4387 const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
4388 if (m_chainman.m_blockman.IsPruneMode() && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) {
4389 LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4390 break;
4391 }
4392 WITH_LOCK(peer->m_block_inv_mutex, peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
4393 if (--nLimit <= 0) {
4394 // When this block is requested, we'll send an inv that'll
4395 // trigger the peer to getblocks the next batch of inventory.
4396 LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4397 WITH_LOCK(peer->m_block_inv_mutex, {peer->m_continuation_block = pindex->GetBlockHash();});
4398 break;
4399 }
4400 }
4401 return;
4402 }
4403
4404 if (msg_type == NetMsgType::GETBLOCKTXN) {
4406 vRecv >> req;
4407
4408 std::shared_ptr<const CBlock> recent_block;
4409 {
4410 LOCK(m_most_recent_block_mutex);
4411 if (m_most_recent_block_hash == req.blockhash)
4412 recent_block = m_most_recent_block;
4413 // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion
4414 }
4415 if (recent_block) {
4416 SendBlockTransactions(pfrom, *peer, *recent_block, req);
4417 return;
4418 }
4419
4420 FlatFilePos block_pos{};
4421 {
4422 LOCK(cs_main);
4423
4424 const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
4425 if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
4426 LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId());
4427 return;
4428 }
4429
4430 if (pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
4431 block_pos = pindex->GetBlockPos();
4432 }
4433 }
4434
4435 if (!block_pos.IsNull()) {
4436 CBlock block;
4437 const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, block_pos)};
4438 // If height is above MAX_BLOCKTXN_DEPTH then this block cannot get
4439 // pruned after we release cs_main above, so this read should never fail.
4440 assert(ret);
4441
4442 SendBlockTransactions(pfrom, *peer, block, req);
4443 return;
4444 }
4445
4446 // If an older block is requested (should never happen in practice,
4447 // but can happen in tests) send a block response instead of a
4448 // blocktxn response. Sending a full block response instead of a
4449 // small blocktxn response is preferable in the case where a peer
4450 // might maliciously send lots of getblocktxn requests to trigger
4451 // expensive disk reads, because it will require the peer to
4452 // actually receive all the data read from disk over the network.
4453 LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
4455 WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv));
4456 // The message processing loop will go around again (without pausing) and we'll respond then
4457 return;
4458 }
4459
4460 if (msg_type == NetMsgType::GETHEADERS) {
4461 CBlockLocator locator;
4462 uint256 hashStop;
4463 vRecv >> locator >> hashStop;
4464
4465 if (locator.vHave.size() > MAX_LOCATOR_SZ) {
4466 LogPrint(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
4467 pfrom.fDisconnect = true;
4468 return;
4469 }
4470
4471 if (m_chainman.m_blockman.LoadingBlocks()) {
4472 LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId());
4473 return;
4474 }
4475
4476 LOCK(cs_main);
4477
4478 // Note that if we were to be on a chain that forks from the checkpointed
4479 // chain, then serving those headers to a peer that has seen the
4480 // checkpointed chain would cause that peer to disconnect us. Requiring
4481 // that our chainwork exceed the minimum chain work is a protection against
4482 // being fed a bogus chain when we started up for the first time and
4483 // getting partitioned off the honest network for serving that chain to
4484 // others.
4485 if (m_chainman.ActiveTip() == nullptr ||
4486 (m_chainman.ActiveTip()->nChainWork < m_chainman.MinimumChainWork() && !pfrom.HasPermission(NetPermissionFlags::Download))) {
4487 LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId());
4488 // Just respond with an empty headers message, to tell the peer to
4489 // go away but not treat us as unresponsive.
4490 MakeAndPushMessage(pfrom, NetMsgType::HEADERS, std::vector<CBlockHeader>());
4491 return;
4492 }
4493
4494 CNodeState *nodestate = State(pfrom.GetId());
4495 const CBlockIndex* pindex = nullptr;
4496 if (locator.IsNull())
4497 {
4498 // If locator is null, return the hashStop block
4499 pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
4500 if (!pindex) {
4501 return;
4502 }
4503
4504 if (!BlockRequestAllowed(pindex)) {
4505 LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId());
4506 return;
4507 }
4508 }
4509 else
4510 {
4511 // Find the last block the caller has in the main chain
4512 pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4513 if (pindex)
4514 pindex = m_chainman.ActiveChain().Next(pindex);
4515 }
4516
4517 // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
4518 std::vector<CBlock> vHeaders;
4519 int nLimit = MAX_HEADERS_RESULTS;
4520 LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
4521 for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
4522 {
4523 vHeaders.emplace_back(pindex->GetBlockHeader());
4524 if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
4525 break;
4526 }
4527 // pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR
4528 // if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty
4529 // headers message). In both cases it's safe to update
4530 // pindexBestHeaderSent to be our tip.
4531 //
4532 // It is important that we simply reset the BestHeaderSent value here,
4533 // and not max(BestHeaderSent, newHeaderSent). We might have announced
4534 // the currently-being-connected tip using a compact block, which
4535 // resulted in the peer sending a headers request, which we respond to
4536 // without the new block. By resetting the BestHeaderSent, we ensure we
4537 // will re-announce the new block via headers (or compact blocks again)
4538 // in the SendMessages logic.
4539 nodestate->pindexBestHeaderSent = pindex ? pindex : m_chainman.ActiveChain().Tip();
4540 MakeAndPushMessage(pfrom, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders));
4541 return;
4542 }
4543
4544 if (msg_type == NetMsgType::TX) {
4545 if (RejectIncomingTxs(pfrom)) {
4546 LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId());
4547 pfrom.fDisconnect = true;
4548 return;
4549 }
4550
4551 // Stop processing the transaction early if we are still in IBD since we don't
4552 // have enough information to validate it yet. Sending unsolicited transactions
4553 // is not considered a protocol violation, so don't punish the peer.
4554 if (m_chainman.IsInitialBlockDownload()) return;
4555
4556 CTransactionRef ptx;
4557 vRecv >> TX_WITH_WITNESS(ptx);
4558 const CTransaction& tx = *ptx;
4559
4560 const uint256& txid = ptx->GetHash();
4561 const uint256& wtxid = ptx->GetWitnessHash();
4562
4563 const uint256& hash = peer->m_wtxid_relay ? wtxid : txid;
4564 AddKnownTx(*peer, hash);
4565
4566 LOCK2(cs_main, m_tx_download_mutex);
4567
4568 m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
4569 if (tx.HasWitness()) m_txrequest.ReceivedResponse(pfrom.GetId(), wtxid);
4570
4571 // We do the AlreadyHaveTx() check using wtxid, rather than txid - in the
4572 // absence of witness malleation, this is strictly better, because the
4573 // recent rejects filter may contain the wtxid but rarely contains
4574 // the txid of a segwit transaction that has been rejected.
4575 // In the presence of witness malleation, it's possible that by only
4576 // doing the check with wtxid, we could overlook a transaction which
4577 // was confirmed with a different witness, or exists in our mempool
4578 // with a different witness, but this has limited downside:
4579 // mempool validation does its own lookup of whether we have the txid
4580 // already; and an adversary can already relay us old transactions
4581 // (older than our recency filter) if trying to DoS us, without any need
4582 // for witness malleation.
4583 if (AlreadyHaveTx(GenTxid::Wtxid(wtxid), /*include_reconsiderable=*/true)) {
4585 // Always relay transactions received from peers with forcerelay
4586 // permission, even if they were already in the mempool, allowing
4587 // the node to function as a gateway for nodes hidden behind it.
4588 if (!m_mempool.exists(GenTxid::Txid(tx.GetHash()))) {
4589 LogPrintf("Not relaying non-mempool transaction %s (wtxid=%s) from forcerelay peer=%d\n",
4590 tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId());
4591 } else {
4592 LogPrintf("Force relaying tx %s (wtxid=%s) from peer=%d\n",
4593 tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId());
4594 RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
4595 }
4596 }
4597
4598 if (RecentRejectsReconsiderableFilter().contains(wtxid)) {
4599 // When a transaction is already in m_lazy_recent_rejects_reconsiderable, we shouldn't submit
4600 // it by itself again. However, look for a matching child in the orphanage, as it is
4601 // possible that they succeed as a package.
4602 LogPrint(BCLog::TXPACKAGES, "found tx %s (wtxid=%s) in reconsiderable rejects, looking for child in orphanage\n",
4603 txid.ToString(), wtxid.ToString());
4604 if (auto package_to_validate{Find1P1CPackage(ptx, pfrom.GetId())}) {
4605 const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)};
4606 LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(),
4607 package_result.m_state.IsValid() ? "package accepted" : "package rejected");
4608 ProcessPackageResult(package_to_validate.value(), package_result);
4609 }
4610 }
4611 // If a tx is detected by m_lazy_recent_rejects it is ignored. Because we haven't
4612 // submitted the tx to our mempool, we won't have computed a DoS
4613 // score for it or determined exactly why we consider it invalid.
4614 //
4615 // This means we won't penalize any peer subsequently relaying a DoSy
4616 // tx (even if we penalized the first peer who gave it to us) because
4617 // we have to account for m_lazy_recent_rejects showing false positives. In
4618 // other words, we shouldn't penalize a peer if we aren't *sure* they
4619 // submitted a DoSy tx.
4620 //
4621 // Note that m_lazy_recent_rejects doesn't just record DoSy or invalid
4622 // transactions, but any tx not accepted by the mempool, which may be
4623 // due to node policy (vs. consensus). So we can't blanket penalize a
4624 // peer simply for relaying a tx that our m_lazy_recent_rejects has caught,
4625 // regardless of false positives.
4626 return;
4627 }
4628
4629 const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx);
4630 const TxValidationState& state = result.m_state;
4631
4633 ProcessValidTx(pfrom.GetId(), ptx, result.m_replaced_transactions);
4635 }
4637 {
4638 bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
4639
4640 // Deduplicate parent txids, so that we don't have to loop over
4641 // the same parent txid more than once down below.
4642 std::vector<uint256> unique_parents;
4643 unique_parents.reserve(tx.vin.size());
4644 for (const CTxIn& txin : tx.vin) {
4645 // We start with all parents, and then remove duplicates below.
4646 unique_parents.push_back(txin.prevout.hash);
4647 }
4648 std::sort(unique_parents.begin(), unique_parents.end());
4649 unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
4650
4651 // Distinguish between parents in m_lazy_recent_rejects and m_lazy_recent_rejects_reconsiderable.
4652 // We can tolerate having up to 1 parent in m_lazy_recent_rejects_reconsiderable since we
4653 // submit 1p1c packages. However, fail immediately if any are in m_lazy_recent_rejects.
4654 std::optional<uint256> rejected_parent_reconsiderable;
4655 for (const uint256& parent_txid : unique_parents) {
4656 if (RecentRejectsFilter().contains(parent_txid)) {
4657 fRejectedParents = true;
4658 break;
4659 } else if (RecentRejectsReconsiderableFilter().contains(parent_txid) && !m_mempool.exists(GenTxid::Txid(parent_txid))) {
4660 // More than 1 parent in m_lazy_recent_rejects_reconsiderable: 1p1c will not be
4661 // sufficient to accept this package, so just give up here.
4662 if (rejected_parent_reconsiderable.has_value()) {
4663 fRejectedParents = true;
4664 break;
4665 }
4666 rejected_parent_reconsiderable = parent_txid;
4667 }
4668 }
4669 if (!fRejectedParents) {
4670 const auto current_time{GetTime<std::chrono::microseconds>()};
4671
4672 for (const uint256& parent_txid : unique_parents) {
4673 // Here, we only have the txid (and not wtxid) of the
4674 // inputs, so we only request in txid mode, even for
4675 // wtxidrelay peers.
4676 // Eventually we should replace this with an improved
4677 // protocol for getting all unconfirmed parents.
4678 const auto gtxid{GenTxid::Txid(parent_txid)};
4679 AddKnownTx(*peer, parent_txid);
4680 // Exclude m_lazy_recent_rejects_reconsiderable: the missing parent may have been
4681 // previously rejected for being too low feerate. This orphan might CPFP it.
4682 if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) AddTxAnnouncement(pfrom, gtxid, current_time);
4683 }
4684
4685 if (m_orphanage.AddTx(ptx, pfrom.GetId())) {
4686 AddToCompactExtraTransactions(ptx);
4687 }
4688
4689 // Once added to the orphan pool, a tx is considered AlreadyHave, and we shouldn't request it anymore.
4690 m_txrequest.ForgetTxHash(tx.GetHash());
4691 m_txrequest.ForgetTxHash(tx.GetWitnessHash());
4692
4693 // DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789)
4694 m_orphanage.LimitOrphans(m_opts.max_orphan_txs, m_rng);
4695 } else {
4696 LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s (wtxid=%s)\n",
4697 tx.GetHash().ToString(),
4698 tx.GetWitnessHash().ToString());
4699 // We will continue to reject this tx since it has rejected
4700 // parents so avoid re-requesting it from other peers.
4701 // Here we add both the txid and the wtxid, as we know that
4702 // regardless of what witness is provided, we will not accept
4703 // this, so we don't need to allow for redownload of this txid
4704 // from any of our non-wtxidrelay peers.
4705 RecentRejectsFilter().insert(tx.GetHash().ToUint256());
4706 RecentRejectsFilter().insert(tx.GetWitnessHash().ToUint256());
4707 m_txrequest.ForgetTxHash(tx.GetHash());
4708 m_txrequest.ForgetTxHash(tx.GetWitnessHash());
4709 }
4710 }
4711 if (state.IsInvalid()) {
4712 ProcessInvalidTx(pfrom.GetId(), ptx, state, /*maybe_add_extra_compact_tx=*/true);
4713 }
4714 // When a transaction fails for TX_RECONSIDERABLE, look for a matching child in the
4715 // orphanage, as it is possible that they succeed as a package.
4717 LogPrint(BCLog::TXPACKAGES, "tx %s (wtxid=%s) failed but reconsiderable, looking for child in orphanage\n",
4718 txid.ToString(), wtxid.ToString());
4719 if (auto package_to_validate{Find1P1CPackage(ptx, pfrom.GetId())}) {
4720 const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)};
4721 LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(),
4722 package_result.m_state.IsValid() ? "package accepted" : "package rejected");
4723 ProcessPackageResult(package_to_validate.value(), package_result);
4724 }
4725 }
4726
4727 return;
4728 }
4729
4730 if (msg_type == NetMsgType::CMPCTBLOCK)
4731 {
4732 // Ignore cmpctblock received while importing
4733 if (m_chainman.m_blockman.LoadingBlocks()) {
4734 LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId());
4735 return;
4736 }
4737
4738 CBlockHeaderAndShortTxIDs cmpctblock;
4739 vRecv >> cmpctblock;
4740
4741 bool received_new_header = false;
4742 const auto blockhash = cmpctblock.header.GetHash();
4743
4744 {
4745 LOCK(cs_main);
4746
4747 const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock);
4748 if (!prev_block) {
4749 // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
4750 if (!m_chainman.IsInitialBlockDownload()) {
4751 MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer);
4752 }
4753 return;
4754 } else if (prev_block->nChainWork + CalculateClaimedHeadersWork({cmpctblock.header}) < GetAntiDoSWorkThreshold()) {
4755 // If we get a low-work header in a compact block, we can ignore it.
4756 LogPrint(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId());
4757 return;
4758 }
4759
4760 if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) {
4761 received_new_header = true;
4762 }
4763 }
4764
4765 const CBlockIndex *pindex = nullptr;
4767 if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, /*min_pow_checked=*/true, state, &pindex)) {
4768 if (state.IsInvalid()) {
4769 MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock");
4770 return;
4771 }
4772 }
4773
4774 if (received_new_header) {
4775 LogInfo("Saw new cmpctblock header hash=%s peer=%d\n",
4776 blockhash.ToString(), pfrom.GetId());
4777 }
4778
4779 bool fProcessBLOCKTXN = false;
4780
4781 // If we end up treating this as a plain headers message, call that as well
4782 // without cs_main.
4783 bool fRevertToHeaderProcessing = false;
4784
4785 // Keep a CBlock for "optimistic" compactblock reconstructions (see
4786 // below)
4787 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4788 bool fBlockReconstructed = false;
4789
4790 {
4791 LOCK(cs_main);
4792 // If AcceptBlockHeader returned true, it set pindex
4793 assert(pindex);
4794 UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
4795
4796 CNodeState *nodestate = State(pfrom.GetId());
4797
4798 // If this was a new header with more work than our tip, update the
4799 // peer's last block announcement time
4800 if (received_new_header && pindex->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
4801 nodestate->m_last_block_announcement = GetTime();
4802 }
4803
4804 if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
4805 return;
4806
4807 auto range_flight = mapBlocksInFlight.equal_range(pindex->GetBlockHash());
4808 size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
4809 bool requested_block_from_this_peer{false};
4810
4811 // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
4812 bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
4813
4814 while (range_flight.first != range_flight.second) {
4815 if (range_flight.first->second.first == pfrom.GetId()) {
4816 requested_block_from_this_peer = true;
4817 break;
4818 }
4819 range_flight.first++;
4820 }
4821
4822 if (pindex->nChainWork <= m_chainman.ActiveChain().Tip()->nChainWork || // We know something better
4823 pindex->nTx != 0) { // We had this block at some point, but pruned it
4824 if (requested_block_from_this_peer) {
4825 // We requested this block for some reason, but our mempool will probably be useless
4826 // so we just grab the block via normal getdata
4827 std::vector<CInv> vInv(1);
4828 vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4829 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4830 }
4831 return;
4832 }
4833
4834 // If we're not close to tip yet, give up and let parallel block fetch work its magic
4835 if (!already_in_flight && !CanDirectFetch()) {
4836 return;
4837 }
4838
4839 // We want to be a bit conservative just to be extra careful about DoS
4840 // possibilities in compact block processing...
4841 if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
4842 if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK && nodestate->vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
4843 requested_block_from_this_peer) {
4844 std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
4845 if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) {
4846 if (!(*queuedBlockIt)->partialBlock)
4847 (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool));
4848 else {
4849 // The block was already in flight using compact blocks from the same peer
4850 LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
4851 return;
4852 }
4853 }
4854
4855 PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
4856 ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
4857 if (status == READ_STATUS_INVALID) {
4858 RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
4859 Misbehaving(*peer, "invalid compact block");
4860 return;
4861 } else if (status == READ_STATUS_FAILED) {
4862 if (first_in_flight) {
4863 // Duplicate txindexes, the block is now in-flight, so just request it
4864 std::vector<CInv> vInv(1);
4865 vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4866 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4867 } else {
4868 // Give up for this peer and wait for other peer(s)
4869 RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
4870 }
4871 return;
4872 }
4873
4875 for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
4876 if (!partialBlock.IsTxAvailable(i))
4877 req.indexes.push_back(i);
4878 }
4879 if (req.indexes.empty()) {
4880 fProcessBLOCKTXN = true;
4881 } else if (first_in_flight) {
4882 // We will try to round-trip any compact blocks we get on failure,
4883 // as long as it's first...
4884 req.blockhash = pindex->GetBlockHash();
4885 MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
4886 } else if (pfrom.m_bip152_highbandwidth_to &&
4887 (!pfrom.IsInboundConn() ||
4888 IsBlockRequestedFromOutbound(blockhash) ||
4889 already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK - 1)) {
4890 // ... or it's a hb relay peer and:
4891 // - peer is outbound, or
4892 // - we already have an outbound attempt in flight(so we'll take what we can get), or
4893 // - it's not the final parallel download slot (which we may reserve for first outbound)
4894 req.blockhash = pindex->GetBlockHash();
4895 MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
4896 } else {
4897 // Give up for this peer and wait for other peer(s)
4898 RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
4899 }
4900 } else {
4901 // This block is either already in flight from a different
4902 // peer, or this peer has too many blocks outstanding to
4903 // download from.
4904 // Optimistically try to reconstruct anyway since we might be
4905 // able to without any round trips.
4906 PartiallyDownloadedBlock tempBlock(&m_mempool);
4907 ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
4908 if (status != READ_STATUS_OK) {
4909 // TODO: don't ignore failures
4910 return;
4911 }
4912 std::vector<CTransactionRef> dummy;
4913 status = tempBlock.FillBlock(*pblock, dummy);
4914 if (status == READ_STATUS_OK) {
4915 fBlockReconstructed = true;
4916 }
4917 }
4918 } else {
4919 if (requested_block_from_this_peer) {
4920 // We requested this block, but its far into the future, so our
4921 // mempool will probably be useless - request the block normally
4922 std::vector<CInv> vInv(1);
4923 vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4924 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
4925 return;
4926 } else {
4927 // If this was an announce-cmpctblock, we want the same treatment as a header message
4928 fRevertToHeaderProcessing = true;
4929 }
4930 }
4931 } // cs_main
4932
4933 if (fProcessBLOCKTXN) {
4935 txn.blockhash = blockhash;
4936 return ProcessCompactBlockTxns(pfrom, *peer, txn);
4937 }
4938
4939 if (fRevertToHeaderProcessing) {
4940 // Headers received from HB compact block peers are permitted to be
4941 // relayed before full validation (see BIP 152), so we don't want to disconnect
4942 // the peer if the header turns out to be for an invalid block.
4943 // Note that if a peer tries to build on an invalid chain, that
4944 // will be detected and the peer will be disconnected/discouraged.
4945 return ProcessHeadersMessage(pfrom, *peer, {cmpctblock.header}, /*via_compact_block=*/true);
4946 }
4947
4948 if (fBlockReconstructed) {
4949 // If we got here, we were able to optimistically reconstruct a
4950 // block that is in flight from some other peer.
4951 {
4952 LOCK(cs_main);
4953 mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false));
4954 }
4955 // Setting force_processing to true means that we bypass some of
4956 // our anti-DoS protections in AcceptBlock, which filters
4957 // unrequested blocks that might be trying to waste our resources
4958 // (eg disk space). Because we only try to reconstruct blocks when
4959 // we're close to caught up (via the CanDirectFetch() requirement
4960 // above, combined with the behavior of not requesting blocks until
4961 // we have a chain with at least the minimum chain work), and we ignore
4962 // compact blocks with less work than our tip, it is safe to treat
4963 // reconstructed compact blocks as having been requested.
4964 ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true);
4965 LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
4966 if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
4967 // Clear download state for this block, which is in
4968 // process from some other peer. We do this after calling
4969 // ProcessNewBlock so that a malleated cmpctblock announcement
4970 // can't be used to interfere with block relay.
4971 RemoveBlockRequest(pblock->GetHash(), std::nullopt);
4972 }
4973 }
4974 return;
4975 }
4976
4977 if (msg_type == NetMsgType::BLOCKTXN)
4978 {
4979 // Ignore blocktxn received while importing
4980 if (m_chainman.m_blockman.LoadingBlocks()) {
4981 LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId());
4982 return;
4983 }
4984
4985 BlockTransactions resp;
4986 vRecv >> resp;
4987
4988 return ProcessCompactBlockTxns(pfrom, *peer, resp);
4989 }
4990
4991 if (msg_type == NetMsgType::HEADERS)
4992 {
4993 // Ignore headers received while importing
4994 if (m_chainman.m_blockman.LoadingBlocks()) {
4995 LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId());
4996 return;
4997 }
4998
4999 std::vector<CBlockHeader> headers;
5000
5001 // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
5002 unsigned int nCount = ReadCompactSize(vRecv);
5003 if (nCount > MAX_HEADERS_RESULTS) {
5004 Misbehaving(*peer, strprintf("headers message size = %u", nCount));
5005 return;
5006 }
5007 headers.resize(nCount);
5008 for (unsigned int n = 0; n < nCount; n++) {
5009 vRecv >> headers[n];
5010 ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
5011 }
5012
5013 ProcessHeadersMessage(pfrom, *peer, std::move(headers), /*via_compact_block=*/false);
5014
5015 // Check if the headers presync progress needs to be reported to validation.
5016 // This needs to be done without holding the m_headers_presync_mutex lock.
5017 if (m_headers_presync_should_signal.exchange(false)) {
5018 HeadersPresyncStats stats;
5019 {
5020 LOCK(m_headers_presync_mutex);
5021 auto it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
5022 if (it != m_headers_presync_stats.end()) stats = it->second;
5023 }
5024 if (stats.second) {
5025 m_chainman.ReportHeadersPresync(stats.first, stats.second->first, stats.second->second);
5026 }
5027 }
5028
5029 return;
5030 }
5031
5032 if (msg_type == NetMsgType::BLOCK)
5033 {
5034 // Ignore block received while importing
5035 if (m_chainman.m_blockman.LoadingBlocks()) {
5036 LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId());
5037 return;
5038 }
5039
5040 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
5041 vRecv >> TX_WITH_WITNESS(*pblock);
5042
5043 LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId());
5044
5045 const CBlockIndex* prev_block{WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock))};
5046
5047 // Check for possible mutation if it connects to something we know so we can check for DEPLOYMENT_SEGWIT being active
5048 if (prev_block && IsBlockMutated(/*block=*/*pblock,
5049 /*check_witness_root=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT))) {
5050 LogDebug(BCLog::NET, "Received mutated block from peer=%d\n", peer->m_id);
5051 Misbehaving(*peer, "mutated block");
5052 WITH_LOCK(cs_main, RemoveBlockRequest(pblock->GetHash(), peer->m_id));
5053 return;
5054 }
5055
5056 bool forceProcessing = false;
5057 const uint256 hash(pblock->GetHash());
5058 bool min_pow_checked = false;
5059 {
5060 LOCK(cs_main);
5061 // Always process the block if we requested it, since we may
5062 // need it even when it's not a candidate for a new best tip.
5063 forceProcessing = IsBlockRequested(hash);
5064 RemoveBlockRequest(hash, pfrom.GetId());
5065 // mapBlockSource is only used for punishing peers and setting
5066 // which peers send us compact blocks, so the race between here and
5067 // cs_main in ProcessNewBlock is fine.
5068 mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
5069
5070 // Check claimed work on this block against our anti-dos thresholds.
5071 if (prev_block && prev_block->nChainWork + CalculateClaimedHeadersWork({pblock->GetBlockHeader()}) >= GetAntiDoSWorkThreshold()) {
5072 min_pow_checked = true;
5073 }
5074 }
5075 ProcessBlock(pfrom, pblock, forceProcessing, min_pow_checked);
5076 return;
5077 }
5078
5079 if (msg_type == NetMsgType::GETADDR) {
5080 // This asymmetric behavior for inbound and outbound connections was introduced
5081 // to prevent a fingerprinting attack: an attacker can send specific fake addresses
5082 // to users' AddrMan and later request them by sending getaddr messages.
5083 // Making nodes which are behind NAT and can only make outgoing connections ignore
5084 // the getaddr message mitigates the attack.
5085 if (!pfrom.IsInboundConn()) {
5086 LogPrint(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId());
5087 return;
5088 }
5089
5090 // Since this must be an inbound connection, SetupAddressRelay will
5091 // never fail.
5092 Assume(SetupAddressRelay(pfrom, *peer));
5093
5094 // Only send one GetAddr response per connection to reduce resource waste
5095 // and discourage addr stamping of INV announcements.
5096 if (peer->m_getaddr_recvd) {
5097 LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId());
5098 return;
5099 }
5100 peer->m_getaddr_recvd = true;
5101
5102 peer->m_addrs_to_send.clear();
5103 std::vector<CAddress> vAddr;
5105 vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /*network=*/std::nullopt);
5106 } else {
5107 vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
5108 }
5109 for (const CAddress &addr : vAddr) {
5110 PushAddress(*peer, addr);
5111 }
5112 return;
5113 }
5114
5115 if (msg_type == NetMsgType::MEMPOOL) {
5116 // Only process received mempool messages if we advertise NODE_BLOOM
5117 // or if the peer has mempool permissions.
5118 if (!(peer->m_our_services & NODE_BLOOM) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
5119 {
5121 {
5122 LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom.GetId());
5123 pfrom.fDisconnect = true;
5124 }
5125 return;
5126 }
5127
5128 if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
5129 {
5131 {
5132 LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom.GetId());
5133 pfrom.fDisconnect = true;
5134 }
5135 return;
5136 }
5137
5138 if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
5139 LOCK(tx_relay->m_tx_inventory_mutex);
5140 tx_relay->m_send_mempool = true;
5141 }
5142 return;
5143 }
5144
5145 if (msg_type == NetMsgType::PING) {
5146 if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
5147 uint64_t nonce = 0;
5148 vRecv >> nonce;
5149 // Echo the message back with the nonce. This allows for two useful features:
5150 //
5151 // 1) A remote node can quickly check if the connection is operational
5152 // 2) Remote nodes can measure the latency of the network thread. If this node
5153 // is overloaded it won't respond to pings quickly and the remote node can
5154 // avoid sending us more work, like chain download requests.
5155 //
5156 // The nonce stops the remote getting confused between different pings: without
5157 // it, if the remote node sends a ping once per second and this node takes 5
5158 // seconds to respond to each, the 5th ping the remote sends would appear to
5159 // return very quickly.
5160 MakeAndPushMessage(pfrom, NetMsgType::PONG, nonce);
5161 }
5162 return;
5163 }
5164
5165 if (msg_type == NetMsgType::PONG) {
5166 const auto ping_end = time_received;
5167 uint64_t nonce = 0;
5168 size_t nAvail = vRecv.in_avail();
5169 bool bPingFinished = false;
5170 std::string sProblem;
5171
5172 if (nAvail >= sizeof(nonce)) {
5173 vRecv >> nonce;
5174
5175 // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
5176 if (peer->m_ping_nonce_sent != 0) {
5177 if (nonce == peer->m_ping_nonce_sent) {
5178 // Matching pong received, this ping is no longer outstanding
5179 bPingFinished = true;
5180 const auto ping_time = ping_end - peer->m_ping_start.load();
5181 if (ping_time.count() >= 0) {
5182 // Let connman know about this successful ping-pong
5183 pfrom.PongReceived(ping_time);
5184 } else {
5185 // This should never happen
5186 sProblem = "Timing mishap";
5187 }
5188 } else {
5189 // Nonce mismatches are normal when pings are overlapping
5190 sProblem = "Nonce mismatch";
5191 if (nonce == 0) {
5192 // This is most likely a bug in another implementation somewhere; cancel this ping
5193 bPingFinished = true;
5194 sProblem = "Nonce zero";
5195 }
5196 }
5197 } else {
5198 sProblem = "Unsolicited pong without ping";
5199 }
5200 } else {
5201 // This is most likely a bug in another implementation somewhere; cancel this ping
5202 bPingFinished = true;
5203 sProblem = "Short payload";
5204 }
5205
5206 if (!(sProblem.empty())) {
5207 LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
5208 pfrom.GetId(),
5209 sProblem,
5210 peer->m_ping_nonce_sent,
5211 nonce,
5212 nAvail);
5213 }
5214 if (bPingFinished) {
5215 peer->m_ping_nonce_sent = 0;
5216 }
5217 return;
5218 }
5219
5220 if (msg_type == NetMsgType::FILTERLOAD) {
5221 if (!(peer->m_our_services & NODE_BLOOM)) {
5222 LogPrint(BCLog::NET, "filterload received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
5223 pfrom.fDisconnect = true;
5224 return;
5225 }
5226 CBloomFilter filter;
5227 vRecv >> filter;
5228
5229 if (!filter.IsWithinSizeConstraints())
5230 {
5231 // There is no excuse for sending a too-large filter
5232 Misbehaving(*peer, "too-large bloom filter");
5233 } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
5234 {
5235 LOCK(tx_relay->m_bloom_filter_mutex);
5236 tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
5237 tx_relay->m_relay_txs = true;
5238 }
5239 pfrom.m_bloom_filter_loaded = true;
5240 pfrom.m_relays_txs = true;
5241 }
5242 return;
5243 }
5244
5245 if (msg_type == NetMsgType::FILTERADD) {
5246 if (!(peer->m_our_services & NODE_BLOOM)) {
5247 LogPrint(BCLog::NET, "filteradd received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
5248 pfrom.fDisconnect = true;
5249 return;
5250 }
5251 std::vector<unsigned char> vData;
5252 vRecv >> vData;
5253
5254 // Nodes must NEVER send a data item > MAX_SCRIPT_ELEMENT_SIZE bytes (the max size for a script data object,
5255 // and thus, the maximum size any matched object can have) in a filteradd message
5256 bool bad = false;
5257 if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
5258 bad = true;
5259 } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
5260 LOCK(tx_relay->m_bloom_filter_mutex);
5261 if (tx_relay->m_bloom_filter) {
5262 tx_relay->m_bloom_filter->insert(vData);
5263 } else {
5264 bad = true;
5265 }
5266 }
5267 if (bad) {
5268 Misbehaving(*peer, "bad filteradd message");
5269 }
5270 return;
5271 }
5272
5273 if (msg_type == NetMsgType::FILTERCLEAR) {
5274 if (!(peer->m_our_services & NODE_BLOOM)) {
5275 LogPrint(BCLog::NET, "filterclear received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
5276 pfrom.fDisconnect = true;
5277 return;
5278 }
5279 auto tx_relay = peer->GetTxRelay();
5280 if (!tx_relay) return;
5281
5282 {
5283 LOCK(tx_relay->m_bloom_filter_mutex);
5284 tx_relay->m_bloom_filter = nullptr;
5285 tx_relay->m_relay_txs = true;
5286 }
5287 pfrom.m_bloom_filter_loaded = false;
5288 pfrom.m_relays_txs = true;
5289 return;
5290 }
5291
5292 if (msg_type == NetMsgType::FEEFILTER) {
5293 CAmount newFeeFilter = 0;
5294 vRecv >> newFeeFilter;
5295 if (MoneyRange(newFeeFilter)) {
5296 if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
5297 tx_relay->m_fee_filter_received = newFeeFilter;
5298 }
5299 LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
5300 }
5301 return;
5302 }
5303
5304 if (msg_type == NetMsgType::GETCFILTERS) {
5305 ProcessGetCFilters(pfrom, *peer, vRecv);
5306 return;
5307 }
5308
5309 if (msg_type == NetMsgType::GETCFHEADERS) {
5310 ProcessGetCFHeaders(pfrom, *peer, vRecv);
5311 return;
5312 }
5313
5314 if (msg_type == NetMsgType::GETCFCHECKPT) {
5315 ProcessGetCFCheckPt(pfrom, *peer, vRecv);
5316 return;
5317 }
5318
5319 if (msg_type == NetMsgType::NOTFOUND) {
5320 std::vector<CInv> vInv;
5321 vRecv >> vInv;
5323 LOCK(m_tx_download_mutex);
5324 for (CInv &inv : vInv) {
5325 if (inv.IsGenTxMsg()) {
5326 // If we receive a NOTFOUND message for a tx we requested, mark the announcement for it as
5327 // completed in TxRequestTracker.
5328 m_txrequest.ReceivedResponse(pfrom.GetId(), inv.hash);
5329 }
5330 }
5331 }
5332 return;
5333 }
5334
5335 // Ignore unknown commands for extensibility
5336 LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
5337 return;
5338}
5339
5340bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer)
5341{
5342 {
5343 LOCK(peer.m_misbehavior_mutex);
5344
5345 // There's nothing to do if the m_should_discourage flag isn't set
5346 if (!peer.m_should_discourage) return false;
5347
5348 peer.m_should_discourage = false;
5349 } // peer.m_misbehavior_mutex
5350
5352 // We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission
5353 LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
5354 return false;
5355 }
5356
5357 if (pnode.IsManualConn()) {
5358 // We never disconnect or discourage manual peers for bad behavior
5359 LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id);
5360 return false;
5361 }
5362
5363 if (pnode.addr.IsLocal()) {
5364 // We disconnect local peers for bad behavior but don't discourage (since that would discourage
5365 // all peers on the same local address)
5366 LogPrint(BCLog::NET, "Warning: disconnecting but not discouraging %s peer %d!\n",
5367 pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id);
5368 pnode.fDisconnect = true;
5369 return true;
5370 }
5371
5372 // Normal case: Disconnect the peer and discourage all nodes sharing the address
5373 LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id);
5374 if (m_banman) m_banman->Discourage(pnode.addr);
5375 m_connman.DisconnectNode(pnode.addr);
5376 return true;
5377}
5378
5379bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
5380{
5381 AssertLockNotHeld(m_tx_download_mutex);
5382 AssertLockHeld(g_msgproc_mutex);
5383
5384 PeerRef peer = GetPeerRef(pfrom->GetId());
5385 if (peer == nullptr) return false;
5386
5387 // For outbound connections, ensure that the initial VERSION message
5388 // has been sent first before processing any incoming messages
5389 if (!pfrom->IsInboundConn() && !peer->m_outbound_version_message_sent) return false;
5390
5391 {
5392 LOCK(peer->m_getdata_requests_mutex);
5393 if (!peer->m_getdata_requests.empty()) {
5394 ProcessGetData(*pfrom, *peer, interruptMsgProc);
5395 }
5396 }
5397
5398 const bool processed_orphan = ProcessOrphanTx(*peer);
5399
5400 if (pfrom->fDisconnect)
5401 return false;
5402
5403 if (processed_orphan) return true;
5404
5405 // this maintains the order of responses
5406 // and prevents m_getdata_requests to grow unbounded
5407 {
5408 LOCK(peer->m_getdata_requests_mutex);
5409 if (!peer->m_getdata_requests.empty()) return true;
5410 }
5411
5412 // Don't bother if send buffer is too full to respond anyway
5413 if (pfrom->fPauseSend) return false;
5414
5415 auto poll_result{pfrom->PollMessage()};
5416 if (!poll_result) {
5417 // No message to process
5418 return false;
5419 }
5420
5421 CNetMessage& msg{poll_result->first};
5422 bool fMoreWork = poll_result->second;
5423
5424 TRACE6(net, inbound_message,
5425 pfrom->GetId(),
5426 pfrom->m_addr_name.c_str(),
5427 pfrom->ConnectionTypeAsString().c_str(),
5428 msg.m_type.c_str(),
5429 msg.m_recv.size(),
5430 msg.m_recv.data()
5431 );
5432
5433 if (m_opts.capture_messages) {
5434 CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true);
5435 }
5436
5437 try {
5438 ProcessMessage(*pfrom, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc);
5439 if (interruptMsgProc) return false;
5440 {
5441 LOCK(peer->m_getdata_requests_mutex);
5442 if (!peer->m_getdata_requests.empty()) fMoreWork = true;
5443 }
5444 // Does this peer has an orphan ready to reconsider?
5445 // (Note: we may have provided a parent for an orphan provided
5446 // by another peer that was already processed; in that case,
5447 // the extra work may not be noticed, possibly resulting in an
5448 // unnecessary 100ms delay)
5449 LOCK(m_tx_download_mutex);
5450 if (m_orphanage.HaveTxToReconsider(peer->m_id)) fMoreWork = true;
5451 } catch (const std::exception& e) {
5452 LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name());
5453 } catch (...) {
5454 LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size);
5455 }
5456
5457 return fMoreWork;
5458}
5459
5460void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds)
5461{
5463
5464 CNodeState &state = *State(pto.GetId());
5465
5466 if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) {
5467 // This is an outbound peer subject to disconnection if they don't
5468 // announce a block with as much work as the current tip within
5469 // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
5470 // their chain has more work than ours, we should sync to it,
5471 // unless it's invalid, in which case we should find that out and
5472 // disconnect from them elsewhere).
5473 if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork) {
5474 // The outbound peer has sent us a block with at least as much work as our current tip, so reset the timeout if it was set
5475 if (state.m_chain_sync.m_timeout != 0s) {
5476 state.m_chain_sync.m_timeout = 0s;
5477 state.m_chain_sync.m_work_header = nullptr;
5478 state.m_chain_sync.m_sent_getheaders = false;
5479 }
5480 } else if (state.m_chain_sync.m_timeout == 0s || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
5481 // At this point we know that the outbound peer has either never sent us a block/header or they have, but its tip is behind ours
5482 // AND
5483 // we are noticing this for the first time (m_timeout is 0)
5484 // OR we noticed this at some point within the last CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds and set a timeout
5485 // for them, they caught up to our tip at the time of setting the timer but not to our current one (we've also advanced).
5486 // Either way, set a new timeout based on our current tip.
5487 state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
5488 state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
5489 state.m_chain_sync.m_sent_getheaders = false;
5490 } else if (state.m_chain_sync.m_timeout > 0s && time_in_seconds > state.m_chain_sync.m_timeout) {
5491 // No evidence yet that our peer has synced to a chain with work equal to that
5492 // of our tip, when we first detected it was behind. Send a single getheaders
5493 // message to give the peer a chance to update us.
5494 if (state.m_chain_sync.m_sent_getheaders) {
5495 // They've run out of time to catch up!
5496 LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
5497 pto.fDisconnect = true;
5498 } else {
5499 assert(state.m_chain_sync.m_work_header);
5500 // Here, we assume that the getheaders message goes out,
5501 // because it'll either go out or be skipped because of a
5502 // getheaders in-flight already, in which case the peer should
5503 // still respond to us with a sufficiently high work chain tip.
5504 MaybeSendGetHeaders(pto,
5505 GetLocator(state.m_chain_sync.m_work_header->pprev),
5506 peer);
5507 LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
5508 state.m_chain_sync.m_sent_getheaders = true;
5509 // Bump the timeout to allow a response, which could clear the timeout
5510 // (if the response shows the peer has synced), reset the timeout (if
5511 // the peer syncs to the required work but not to our tip), or result
5512 // in disconnect (if we advance to the timeout and pindexBestKnownBlock
5513 // has not sufficiently progressed)
5514 state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
5515 }
5516 }
5517 }
5518}
5519
5520void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now)
5521{
5522 // If we have any extra block-relay-only peers, disconnect the youngest unless
5523 // it's given us a block -- in which case, compare with the second-youngest, and
5524 // out of those two, disconnect the peer who least recently gave us a block.
5525 // The youngest block-relay-only peer would be the extra peer we connected
5526 // to temporarily in order to sync our tip; see net.cpp.
5527 // Note that we use higher nodeid as a measure for most recent connection.
5528 if (m_connman.GetExtraBlockRelayCount() > 0) {
5529 std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0}, next_youngest_peer{-1, 0};
5530
5531 m_connman.ForEachNode([&](CNode* pnode) {
5532 if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) return;
5533 if (pnode->GetId() > youngest_peer.first) {
5534 next_youngest_peer = youngest_peer;
5535 youngest_peer.first = pnode->GetId();
5536 youngest_peer.second = pnode->m_last_block_time;
5537 }
5538 });
5539 NodeId to_disconnect = youngest_peer.first;
5540 if (youngest_peer.second > next_youngest_peer.second) {
5541 // Our newest block-relay-only peer gave us a block more recently;
5542 // disconnect our second youngest.
5543 to_disconnect = next_youngest_peer.first;
5544 }
5545 m_connman.ForNode(to_disconnect, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5547 // Make sure we're not getting a block right now, and that
5548 // we've been connected long enough for this eviction to happen
5549 // at all.
5550 // Note that we only request blocks from a peer if we learn of a
5551 // valid headers chain with at least as much work as our tip.
5552 CNodeState *node_state = State(pnode->GetId());
5553 if (node_state == nullptr ||
5554 (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->vBlocksInFlight.empty())) {
5555 pnode->fDisconnect = true;
5556 LogPrint(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n",
5557 pnode->GetId(), count_seconds(pnode->m_last_block_time));
5558 return true;
5559 } else {
5560 LogPrint(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5561 pnode->GetId(), count_seconds(pnode->m_connected), node_state->vBlocksInFlight.size());
5562 }
5563 return false;
5564 });
5565 }
5566
5567 // Check whether we have too many outbound-full-relay peers
5568 if (m_connman.GetExtraFullOutboundCount() > 0) {
5569 // If we have more outbound-full-relay peers than we target, disconnect one.
5570 // Pick the outbound-full-relay peer that least recently announced
5571 // us a new block, with ties broken by choosing the more recent
5572 // connection (higher node id)
5573 // Protect peers from eviction if we don't have another connection
5574 // to their network, counting both outbound-full-relay and manual peers.
5575 NodeId worst_peer = -1;
5576 int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
5577
5578 m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_connman.GetNodesMutex()) {
5579 AssertLockHeld(::cs_main);
5580
5581 // Only consider outbound-full-relay peers that are not already
5582 // marked for disconnection
5583 if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return;
5584 CNodeState *state = State(pnode->GetId());
5585 if (state == nullptr) return; // shouldn't be possible, but just in case
5586 // Don't evict our protected peers
5587 if (state->m_chain_sync.m_protect) return;
5588 // If this is the only connection on a particular network that is
5589 // OUTBOUND_FULL_RELAY or MANUAL, protect it.
5590 if (!m_connman.MultipleManualOrFullOutboundConns(pnode->addr.GetNetwork())) return;
5591 if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
5592 worst_peer = pnode->GetId();
5593 oldest_block_announcement = state->m_last_block_announcement;
5594 }
5595 });
5596 if (worst_peer != -1) {
5597 bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5599
5600 // Only disconnect a peer that has been connected to us for
5601 // some reasonable fraction of our check-frequency, to give
5602 // it time for new information to have arrived.
5603 // Also don't disconnect any peer we're trying to download a
5604 // block from.
5605 CNodeState &state = *State(pnode->GetId());
5606 if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.vBlocksInFlight.empty()) {
5607 LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
5608 pnode->fDisconnect = true;
5609 return true;
5610 } else {
5611 LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5612 pnode->GetId(), count_seconds(pnode->m_connected), state.vBlocksInFlight.size());
5613 return false;
5614 }
5615 });
5616 if (disconnected) {
5617 // If we disconnected an extra peer, that means we successfully
5618 // connected to at least one peer after the last time we
5619 // detected a stale tip. Don't try any more extra peers until
5620 // we next detect a stale tip, to limit the load we put on the
5621 // network from these extra connections.
5622 m_connman.SetTryNewOutboundPeer(false);
5623 }
5624 }
5625 }
5626}
5627
5628void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
5629{
5630 LOCK(cs_main);
5631
5633
5634 EvictExtraOutboundPeers(now);
5635
5636 if (now > m_stale_tip_check_time) {
5637 // Check whether our tip is stale, and if so, allow using an extra
5638 // outbound peer
5639 if (!m_chainman.m_blockman.LoadingBlocks() && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
5640 LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n",
5641 count_seconds(now - m_last_tip_update.load()));
5642 m_connman.SetTryNewOutboundPeer(true);
5643 } else if (m_connman.GetTryNewOutboundPeer()) {
5644 m_connman.SetTryNewOutboundPeer(false);
5645 }
5646 m_stale_tip_check_time = now + STALE_CHECK_INTERVAL;
5647 }
5648
5649 if (!m_initial_sync_finished && CanDirectFetch()) {
5650 m_connman.StartExtraBlockRelayPeers();
5651 m_initial_sync_finished = true;
5652 }
5653}
5654
5655void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now)
5656{
5657 if (m_connman.ShouldRunInactivityChecks(node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
5658 peer.m_ping_nonce_sent &&
5659 now > peer.m_ping_start.load() + TIMEOUT_INTERVAL)
5660 {
5661 // The ping timeout is using mocktime. To disable the check during
5662 // testing, increase -peertimeout.
5663 LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), peer.m_id);
5664 node_to.fDisconnect = true;
5665 return;
5666 }
5667
5668 bool pingSend = false;
5669
5670 if (peer.m_ping_queued) {
5671 // RPC ping request by user
5672 pingSend = true;
5673 }
5674
5675 if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() + PING_INTERVAL) {
5676 // Ping automatically sent as a latency probe & keepalive.
5677 pingSend = true;
5678 }
5679
5680 if (pingSend) {
5681 uint64_t nonce;
5682 do {
5684 } while (nonce == 0);
5685 peer.m_ping_queued = false;
5686 peer.m_ping_start = now;
5687 if (node_to.GetCommonVersion() > BIP0031_VERSION) {
5688 peer.m_ping_nonce_sent = nonce;
5689 MakeAndPushMessage(node_to, NetMsgType::PING, nonce);
5690 } else {
5691 // Peer is too old to support ping command with nonce, pong will never arrive.
5692 peer.m_ping_nonce_sent = 0;
5693 MakeAndPushMessage(node_to, NetMsgType::PING);
5694 }
5695 }
5696}
5697
5698void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time)
5699{
5700 // Nothing to do for non-address-relay peers
5701 if (!peer.m_addr_relay_enabled) return;
5702
5703 LOCK(peer.m_addr_send_times_mutex);
5704 // Periodically advertise our local address to the peer.
5705 if (fListen && !m_chainman.IsInitialBlockDownload() &&
5706 peer.m_next_local_addr_send < current_time) {
5707 // If we've sent before, clear the bloom filter for the peer, so that our
5708 // self-announcement will actually go out.
5709 // This might be unnecessary if the bloom filter has already rolled
5710 // over since our last self-announcement, but there is only a small
5711 // bandwidth cost that we can incur by doing this (which happens
5712 // once a day on average).
5713 if (peer.m_next_local_addr_send != 0us) {
5714 peer.m_addr_known->reset();
5715 }
5716 if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) {
5717 CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()};
5718 PushAddress(peer, local_addr);
5719 }
5720 peer.m_next_local_addr_send = current_time + m_rng.rand_exp_duration(AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
5721 }
5722
5723 // We sent an `addr` message to this peer recently. Nothing more to do.
5724 if (current_time <= peer.m_next_addr_send) return;
5725
5726 peer.m_next_addr_send = current_time + m_rng.rand_exp_duration(AVG_ADDRESS_BROADCAST_INTERVAL);
5727
5728 if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) {
5729 // Should be impossible since we always check size before adding to
5730 // m_addrs_to_send. Recover by trimming the vector.
5731 peer.m_addrs_to_send.resize(MAX_ADDR_TO_SEND);
5732 }
5733
5734 // Remove addr records that the peer already knows about, and add new
5735 // addrs to the m_addr_known filter on the same pass.
5736 auto addr_already_known = [&peer](const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) {
5737 bool ret = peer.m_addr_known->contains(addr.GetKey());
5738 if (!ret) peer.m_addr_known->insert(addr.GetKey());
5739 return ret;
5740 };
5741 peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(), peer.m_addrs_to_send.end(), addr_already_known),
5742 peer.m_addrs_to_send.end());
5743
5744 // No addr messages to send
5745 if (peer.m_addrs_to_send.empty()) return;
5746
5747 if (peer.m_wants_addrv2) {
5748 MakeAndPushMessage(node, NetMsgType::ADDRV2, CAddress::V2_NETWORK(peer.m_addrs_to_send));
5749 } else {
5750 MakeAndPushMessage(node, NetMsgType::ADDR, CAddress::V1_NETWORK(peer.m_addrs_to_send));
5751 }
5752 peer.m_addrs_to_send.clear();
5753
5754 // we only send the big addr message once
5755 if (peer.m_addrs_to_send.capacity() > 40) {
5756 peer.m_addrs_to_send.shrink_to_fit();
5757 }
5758}
5759
5760void PeerManagerImpl::MaybeSendSendHeaders(CNode& node, Peer& peer)
5761{
5762 // Delay sending SENDHEADERS (BIP 130) until we're done with an
5763 // initial-headers-sync with this peer. Receiving headers announcements for
5764 // new blocks while trying to sync their headers chain is problematic,
5765 // because of the state tracking done.
5766 if (!peer.m_sent_sendheaders && node.GetCommonVersion() >= SENDHEADERS_VERSION) {
5767 LOCK(cs_main);
5768 CNodeState &state = *State(node.GetId());
5769 if (state.pindexBestKnownBlock != nullptr &&
5770 state.pindexBestKnownBlock->nChainWork > m_chainman.MinimumChainWork()) {
5771 // Tell our peer we prefer to receive headers rather than inv's
5772 // We send this to non-NODE NETWORK peers as well, because even
5773 // non-NODE NETWORK peers can announce blocks (such as pruning
5774 // nodes)
5775 MakeAndPushMessage(node, NetMsgType::SENDHEADERS);
5776 peer.m_sent_sendheaders = true;
5777 }
5778 }
5779}
5780
5781void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::microseconds current_time)
5782{
5783 if (m_opts.ignore_incoming_txs) return;
5784 if (pto.GetCommonVersion() < FEEFILTER_VERSION) return;
5785 // peers with the forcerelay permission should not filter txs to us
5787 // Don't send feefilter messages to outbound block-relay-only peers since they should never announce
5788 // transactions to us, regardless of feefilter state.
5789 if (pto.IsBlockOnlyConn()) return;
5790
5791 CAmount currentFilter = m_mempool.GetMinFee().GetFeePerK();
5792
5793 if (m_chainman.IsInitialBlockDownload()) {
5794 // Received tx-inv messages are discarded when the active
5795 // chainstate is in IBD, so tell the peer to not send them.
5796 currentFilter = MAX_MONEY;
5797 } else {
5798 static const CAmount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)};
5799 if (peer.m_fee_filter_sent == MAX_FILTER) {
5800 // Send the current filter if we sent MAX_FILTER previously
5801 // and made it out of IBD.
5802 peer.m_next_send_feefilter = 0us;
5803 }
5804 }
5805 if (current_time > peer.m_next_send_feefilter) {
5806 CAmount filterToSend = m_fee_filter_rounder.round(currentFilter);
5807 // We always have a fee filter of at least the min relay fee
5808 filterToSend = std::max(filterToSend, m_mempool.m_opts.min_relay_feerate.GetFeePerK());
5809 if (filterToSend != peer.m_fee_filter_sent) {
5810 MakeAndPushMessage(pto, NetMsgType::FEEFILTER, filterToSend);
5811 peer.m_fee_filter_sent = filterToSend;
5812 }
5813 peer.m_next_send_feefilter = current_time + m_rng.rand_exp_duration(AVG_FEEFILTER_BROADCAST_INTERVAL);
5814 }
5815 // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
5816 // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
5817 else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_next_send_feefilter &&
5818 (currentFilter < 3 * peer.m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
5819 peer.m_next_send_feefilter = current_time + m_rng.randrange<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY);
5820 }
5821}
5822
5823namespace {
5824class CompareInvMempoolOrder
5825{
5826 CTxMemPool* mp;
5827 bool m_wtxid_relay;
5828public:
5829 explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid)
5830 {
5831 mp = _mempool;
5832 m_wtxid_relay = use_wtxid;
5833 }
5834
5835 bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
5836 {
5837 /* As std::make_heap produces a max-heap, we want the entries with the
5838 * fewest ancestors/highest fee to sort later. */
5839 return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay);
5840 }
5841};
5842} // namespace
5843
5844bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const
5845{
5846 // block-relay-only peers may never send txs to us
5847 if (peer.IsBlockOnlyConn()) return true;
5848 if (peer.IsFeelerConn()) return true;
5849 // In -blocksonly mode, peers need the 'relay' permission to send txs to us
5850 if (m_opts.ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)) return true;
5851 return false;
5852}
5853
5854bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer)
5855{
5856 // We don't participate in addr relay with outbound block-relay-only
5857 // connections to prevent providing adversaries with the additional
5858 // information of addr traffic to infer the link.
5859 if (node.IsBlockOnlyConn()) return false;
5860
5861 if (!peer.m_addr_relay_enabled.exchange(true)) {
5862 // During version message processing (non-block-relay-only outbound peers)
5863 // or on first addr-related message we have received (inbound peers), initialize
5864 // m_addr_known.
5865 peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
5866 }
5867
5868 return true;
5869}
5870
5871bool PeerManagerImpl::SendMessages(CNode* pto)
5872{
5873 AssertLockNotHeld(m_tx_download_mutex);
5874 AssertLockHeld(g_msgproc_mutex);
5875
5876 PeerRef peer = GetPeerRef(pto->GetId());
5877 if (!peer) return false;
5878 const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
5879
5880 // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
5881 // disconnect misbehaving peers even before the version handshake is complete.
5882 if (MaybeDiscourageAndDisconnect(*pto, *peer)) return true;
5883
5884 // Initiate version handshake for outbound connections
5885 if (!pto->IsInboundConn() && !peer->m_outbound_version_message_sent) {
5886 PushNodeVersion(*pto, *peer);
5887 peer->m_outbound_version_message_sent = true;
5888 }
5889
5890 // Don't send anything until the version handshake is complete
5891 if (!pto->fSuccessfullyConnected || pto->fDisconnect)
5892 return true;
5893
5894 const auto current_time{GetTime<std::chrono::microseconds>()};
5895
5896 if (pto->IsAddrFetchConn() && current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
5897 LogPrint(BCLog::NET, "addrfetch connection timeout; disconnecting peer=%d\n", pto->GetId());
5898 pto->fDisconnect = true;
5899 return true;
5900 }
5901
5902 MaybeSendPing(*pto, *peer, current_time);
5903
5904 // MaybeSendPing may have marked peer for disconnection
5905 if (pto->fDisconnect) return true;
5906
5907 MaybeSendAddr(*pto, *peer, current_time);
5908
5909 MaybeSendSendHeaders(*pto, *peer);
5910
5911 {
5912 LOCK(cs_main);
5913
5914 CNodeState &state = *State(pto->GetId());
5915
5916 // Start block sync
5917 if (m_chainman.m_best_header == nullptr) {
5918 m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
5919 }
5920
5921 // Determine whether we might try initial headers sync or parallel
5922 // block download from this peer -- this mostly affects behavior while
5923 // in IBD (once out of IBD, we sync from all peers).
5924 bool sync_blocks_and_headers_from_peer = false;
5925 if (state.fPreferredDownload) {
5926 sync_blocks_and_headers_from_peer = true;
5927 } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) {
5928 // Typically this is an inbound peer. If we don't have any outbound
5929 // peers, or if we aren't downloading any blocks from such peers,
5930 // then allow block downloads from this peer, too.
5931 // We prefer downloading blocks from outbound peers to avoid
5932 // putting undue load on (say) some home user who is just making
5933 // outbound connections to the network, but if our only source of
5934 // the latest blocks is from an inbound peer, we have to be sure to
5935 // eventually download it (and not just wait indefinitely for an
5936 // outbound peer to have it).
5937 if (m_num_preferred_download_peers == 0 || mapBlocksInFlight.empty()) {
5938 sync_blocks_and_headers_from_peer = true;
5939 }
5940 }
5941
5942 if (!state.fSyncStarted && CanServeBlocks(*peer) && !m_chainman.m_blockman.LoadingBlocks()) {
5943 // Only actively request headers from a single peer, unless we're close to today.
5944 if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > NodeClock::now() - 24h) {
5945 const CBlockIndex* pindexStart = m_chainman.m_best_header;
5946 /* If possible, start at the block preceding the currently
5947 best known header. This ensures that we always get a
5948 non-empty list of headers back as long as the peer
5949 is up-to-date. With a non-empty response, we can initialise
5950 the peer's known best block. This wouldn't be possible
5951 if we requested starting at m_chainman.m_best_header and
5952 got back an empty response. */
5953 if (pindexStart->pprev)
5954 pindexStart = pindexStart->pprev;
5955 if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) {
5956 LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
5957
5958 state.fSyncStarted = true;
5959 peer->m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
5960 (
5961 // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
5962 // to maintain precision
5963 std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
5964 Ticks<std::chrono::seconds>(NodeClock::now() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing
5965 );
5966 nSyncStarted++;
5967 }
5968 }
5969 }
5970
5971 //
5972 // Try sending block announcements via headers
5973 //
5974 {
5975 // If we have no more than MAX_BLOCKS_TO_ANNOUNCE in our
5976 // list of block hashes we're relaying, and our peer wants
5977 // headers announcements, then find the first header
5978 // not yet known to our peer but would connect, and send.
5979 // If no header would connect, or if we have too many
5980 // blocks, or if the peer doesn't want headers, just
5981 // add all to the inv queue.
5982 LOCK(peer->m_block_inv_mutex);
5983 std::vector<CBlock> vHeaders;
5984 bool fRevertToInv = ((!peer->m_prefers_headers &&
5985 (!state.m_requested_hb_cmpctblocks || peer->m_blocks_for_headers_relay.size() > 1)) ||
5986 peer->m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE);
5987 const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
5988 ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
5989
5990 if (!fRevertToInv) {
5991 bool fFoundStartingHeader = false;
5992 // Try to find first header that our peer doesn't have, and
5993 // then send all headers past that one. If we come across any
5994 // headers that aren't on m_chainman.ActiveChain(), give up.
5995 for (const uint256& hash : peer->m_blocks_for_headers_relay) {
5996 const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
5997 assert(pindex);
5998 if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
5999 // Bail out if we reorged away from this block
6000 fRevertToInv = true;
6001 break;
6002 }
6003 if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
6004 // This means that the list of blocks to announce don't
6005 // connect to each other.
6006 // This shouldn't really be possible to hit during
6007 // regular operation (because reorgs should take us to
6008 // a chain that has some block not on the prior chain,
6009 // which should be caught by the prior check), but one
6010 // way this could happen is by using invalidateblock /
6011 // reconsiderblock repeatedly on the tip, causing it to
6012 // be added multiple times to m_blocks_for_headers_relay.
6013 // Robustly deal with this rare situation by reverting
6014 // to an inv.
6015 fRevertToInv = true;
6016 break;
6017 }
6018 pBestIndex = pindex;
6019 if (fFoundStartingHeader) {
6020 // add this to the headers message
6021 vHeaders.emplace_back(pindex->GetBlockHeader());
6022 } else if (PeerHasHeader(&state, pindex)) {
6023 continue; // keep looking for the first new block
6024 } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
6025 // Peer doesn't have this header but they do have the prior one.
6026 // Start sending headers.
6027 fFoundStartingHeader = true;
6028 vHeaders.emplace_back(pindex->GetBlockHeader());
6029 } else {
6030 // Peer doesn't have this header or the prior one -- nothing will
6031 // connect, so bail out.
6032 fRevertToInv = true;
6033 break;
6034 }
6035 }
6036 }
6037 if (!fRevertToInv && !vHeaders.empty()) {
6038 if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
6039 // We only send up to 1 block as header-and-ids, as otherwise
6040 // probably means we're doing an initial-ish-sync or they're slow
6041 LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
6042 vHeaders.front().GetHash().ToString(), pto->GetId());
6043
6044 std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
6045 {
6046 LOCK(m_most_recent_block_mutex);
6047 if (m_most_recent_block_hash == pBestIndex->GetBlockHash()) {
6048 cached_cmpctblock_msg = NetMsg::Make(NetMsgType::CMPCTBLOCK, *m_most_recent_compact_block);
6049 }
6050 }
6051 if (cached_cmpctblock_msg.has_value()) {
6052 PushMessage(*pto, std::move(cached_cmpctblock_msg.value()));
6053 } else {
6054 CBlock block;
6055 const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, *pBestIndex)};
6056 assert(ret);
6057 CBlockHeaderAndShortTxIDs cmpctblock{block, m_rng.rand64()};
6058 MakeAndPushMessage(*pto, NetMsgType::CMPCTBLOCK, cmpctblock);
6059 }
6060 state.pindexBestHeaderSent = pBestIndex;
6061 } else if (peer->m_prefers_headers) {
6062 if (vHeaders.size() > 1) {
6063 LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
6064 vHeaders.size(),
6065 vHeaders.front().GetHash().ToString(),
6066 vHeaders.back().GetHash().ToString(), pto->GetId());
6067 } else {
6068 LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
6069 vHeaders.front().GetHash().ToString(), pto->GetId());
6070 }
6071 MakeAndPushMessage(*pto, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders));
6072 state.pindexBestHeaderSent = pBestIndex;
6073 } else
6074 fRevertToInv = true;
6075 }
6076 if (fRevertToInv) {
6077 // If falling back to using an inv, just try to inv the tip.
6078 // The last entry in m_blocks_for_headers_relay was our tip at some point
6079 // in the past.
6080 if (!peer->m_blocks_for_headers_relay.empty()) {
6081 const uint256& hashToAnnounce = peer->m_blocks_for_headers_relay.back();
6082 const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
6083 assert(pindex);
6084
6085 // Warn if we're announcing a block that is not on the main chain.
6086 // This should be very rare and could be optimized out.
6087 // Just log for now.
6088 if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
6089 LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
6090 hashToAnnounce.ToString(), m_chainman.ActiveChain().Tip()->GetBlockHash().ToString());
6091 }
6092
6093 // If the peer's chain has this block, don't inv it back.
6094 if (!PeerHasHeader(&state, pindex)) {
6095 peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
6096 LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
6097 pto->GetId(), hashToAnnounce.ToString());
6098 }
6099 }
6100 }
6101 peer->m_blocks_for_headers_relay.clear();
6102 }
6103
6104 //
6105 // Message: inventory
6106 //
6107 std::vector<CInv> vInv;
6108 {
6109 LOCK(peer->m_block_inv_mutex);
6110 vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(), INVENTORY_BROADCAST_TARGET));
6111
6112 // Add blocks
6113 for (const uint256& hash : peer->m_blocks_for_inv_relay) {
6114 vInv.emplace_back(MSG_BLOCK, hash);
6115 if (vInv.size() == MAX_INV_SZ) {
6116 MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
6117 vInv.clear();
6118 }
6119 }
6120 peer->m_blocks_for_inv_relay.clear();
6121 }
6122
6123 if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
6124 LOCK(tx_relay->m_tx_inventory_mutex);
6125 // Check whether periodic sends should happen
6126 bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
6127 if (tx_relay->m_next_inv_send_time < current_time) {
6128 fSendTrickle = true;
6129 if (pto->IsInboundConn()) {
6130 tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
6131 } else {
6132 tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
6133 }
6134 }
6135
6136 // Time to send but the peer has requested we not relay transactions.
6137 if (fSendTrickle) {
6138 LOCK(tx_relay->m_bloom_filter_mutex);
6139 if (!tx_relay->m_relay_txs) tx_relay->m_tx_inventory_to_send.clear();
6140 }
6141
6142 // Respond to BIP35 mempool requests
6143 if (fSendTrickle && tx_relay->m_send_mempool) {
6144 auto vtxinfo = m_mempool.infoAll();
6145 tx_relay->m_send_mempool = false;
6146 const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
6147
6148 LOCK(tx_relay->m_bloom_filter_mutex);
6149
6150 for (const auto& txinfo : vtxinfo) {
6151 CInv inv{
6152 peer->m_wtxid_relay ? MSG_WTX : MSG_TX,
6153 peer->m_wtxid_relay ?
6154 txinfo.tx->GetWitnessHash().ToUint256() :
6155 txinfo.tx->GetHash().ToUint256(),
6156 };
6157 tx_relay->m_tx_inventory_to_send.erase(inv.hash);
6158
6159 // Don't send transactions that peers will not put into their mempool
6160 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
6161 continue;
6162 }
6163 if (tx_relay->m_bloom_filter) {
6164 if (!tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
6165 }
6166 tx_relay->m_tx_inventory_known_filter.insert(inv.hash);
6167 vInv.push_back(inv);
6168 if (vInv.size() == MAX_INV_SZ) {
6169 MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
6170 vInv.clear();
6171 }
6172 }
6173 }
6174
6175 // Determine transactions to relay
6176 if (fSendTrickle) {
6177 // Produce a vector with all candidates for sending
6178 std::vector<std::set<uint256>::iterator> vInvTx;
6179 vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
6180 for (std::set<uint256>::iterator it = tx_relay->m_tx_inventory_to_send.begin(); it != tx_relay->m_tx_inventory_to_send.end(); it++) {
6181 vInvTx.push_back(it);
6182 }
6183 const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
6184 // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
6185 // A heap is used so that not all items need sorting if only a few are being sent.
6186 CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, peer->m_wtxid_relay);
6187 std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
6188 // No reason to drain out at many times the network's capacity,
6189 // especially since we have many peers and some will draw much shorter delays.
6190 unsigned int nRelayedTransactions = 0;
6191 LOCK(tx_relay->m_bloom_filter_mutex);
6192 size_t broadcast_max{INVENTORY_BROADCAST_TARGET + (tx_relay->m_tx_inventory_to_send.size()/1000)*5};
6193 broadcast_max = std::min<size_t>(INVENTORY_BROADCAST_MAX, broadcast_max);
6194 while (!vInvTx.empty() && nRelayedTransactions < broadcast_max) {
6195 // Fetch the top element from the heap
6196 std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
6197 std::set<uint256>::iterator it = vInvTx.back();
6198 vInvTx.pop_back();
6199 uint256 hash = *it;
6200 CInv inv(peer->m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
6201 // Remove it from the to-be-sent set
6202 tx_relay->m_tx_inventory_to_send.erase(it);
6203 // Check if not in the filter already
6204 if (tx_relay->m_tx_inventory_known_filter.contains(hash)) {
6205 continue;
6206 }
6207 // Not in the mempool anymore? don't bother sending it.
6208 auto txinfo = m_mempool.info(ToGenTxid(inv));
6209 if (!txinfo.tx) {
6210 continue;
6211 }
6212 // Peer told you to not send transactions at that feerate? Don't bother sending it.
6213 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
6214 continue;
6215 }
6216 if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
6217 // Send
6218 vInv.push_back(inv);
6219 nRelayedTransactions++;
6220 if (vInv.size() == MAX_INV_SZ) {
6221 MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
6222 vInv.clear();
6223 }
6224 tx_relay->m_tx_inventory_known_filter.insert(hash);
6225 }
6226
6227 // Ensure we'll respond to GETDATA requests for anything we've just announced
6228 LOCK(m_mempool.cs);
6229 tx_relay->m_last_inv_sequence = m_mempool.GetSequence();
6230 }
6231 }
6232 if (!vInv.empty())
6233 MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
6234
6235 // Detect whether we're stalling
6236 auto stalling_timeout = m_block_stalling_timeout.load();
6237 if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) {
6238 // Stalling only triggers when the block download window cannot move. During normal steady state,
6239 // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
6240 // should only happen during initial block download.
6241 LogPrintf("Peer=%d%s is stalling block download, disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
6242 pto->fDisconnect = true;
6243 // Increase timeout for the next peer so that we don't disconnect multiple peers if our own
6244 // bandwidth is insufficient.
6245 const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
6246 if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
6247 LogPrint(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout));
6248 }
6249 return true;
6250 }
6251 // In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N)
6252 // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
6253 // We compensate for other peers to prevent killing off peers due to our own downstream link
6254 // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
6255 // to unreasonably increase our timeout.
6256 if (state.vBlocksInFlight.size() > 0) {
6257 QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
6258 int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1;
6259 if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
6260 LogPrintf("Timeout downloading block %s from peer=%d%s, disconnecting\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
6261 pto->fDisconnect = true;
6262 return true;
6263 }
6264 }
6265 // Check for headers sync timeouts
6266 if (state.fSyncStarted && peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
6267 // Detect whether this is a stalling initial-headers-sync peer
6268 if (m_chainman.m_best_header->Time() <= NodeClock::now() - 24h) {
6269 if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) {
6270 // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer,
6271 // and we have others we could be using instead.
6272 // Note: If all our peers are inbound, then we won't
6273 // disconnect our sync peer for stalling; we have bigger
6274 // problems if we can't get any outbound peers.
6276 LogPrintf("Timeout downloading headers from peer=%d%s, disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
6277 pto->fDisconnect = true;
6278 return true;
6279 } else {
6280 LogPrintf("Timeout downloading headers from noban peer=%d%s, not disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
6281 // Reset the headers sync state so that we have a
6282 // chance to try downloading from a different peer.
6283 // Note: this will also result in at least one more
6284 // getheaders message to be sent to
6285 // this peer (eventually).
6286 state.fSyncStarted = false;
6287 nSyncStarted--;
6288 peer->m_headers_sync_timeout = 0us;
6289 }
6290 }
6291 } else {
6292 // After we've caught up once, reset the timeout so we can't trigger
6293 // disconnect later.
6294 peer->m_headers_sync_timeout = std::chrono::microseconds::max();
6295 }
6296 }
6297
6298 // Check that outbound peers have reasonable chains
6299 // GetTime() is used by this anti-DoS logic so we can test this using mocktime
6300 ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
6301
6302 //
6303 // Message: getdata (blocks)
6304 //
6305 std::vector<CInv> vGetData;
6306 if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
6307 std::vector<const CBlockIndex*> vToDownload;
6308 NodeId staller = -1;
6309 auto get_inflight_budget = [&state]() {
6310 return std::max(0, MAX_BLOCKS_IN_TRANSIT_PER_PEER - static_cast<int>(state.vBlocksInFlight.size()));
6311 };
6312
6313 // If a snapshot chainstate is in use, we want to find its next blocks
6314 // before the background chainstate to prioritize getting to network tip.
6315 FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload, staller);
6316 if (m_chainman.BackgroundSyncInProgress() && !IsLimitedPeer(*peer)) {
6317 // If the background tip is not an ancestor of the snapshot block,
6318 // we need to start requesting blocks from their last common ancestor.
6319 const CBlockIndex *from_tip = LastCommonAncestor(m_chainman.GetBackgroundSyncTip(), m_chainman.GetSnapshotBaseBlock());
6320 TryDownloadingHistoricalBlocks(
6321 *peer,
6322 get_inflight_budget(),
6323 vToDownload, from_tip,
6324 Assert(m_chainman.GetSnapshotBaseBlock()));
6325 }
6326 for (const CBlockIndex *pindex : vToDownload) {
6327 uint32_t nFetchFlags = GetFetchFlags(*peer);
6328 vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash());
6329 BlockRequested(pto->GetId(), *pindex);
6330 LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
6331 pindex->nHeight, pto->GetId());
6332 }
6333 if (state.vBlocksInFlight.empty() && staller != -1) {
6334 if (State(staller)->m_stalling_since == 0us) {
6335 State(staller)->m_stalling_since = current_time;
6336 LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
6337 }
6338 }
6339 }
6340
6341 //
6342 // Message: getdata (transactions)
6343 //
6344 {
6345 LOCK(m_tx_download_mutex);
6346 std::vector<std::pair<NodeId, GenTxid>> expired;
6347 auto requestable = m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
6348 for (const auto& entry : expired) {
6349 LogPrint(BCLog::NET, "timeout of inflight %s %s from peer=%d\n", entry.second.IsWtxid() ? "wtx" : "tx",
6350 entry.second.GetHash().ToString(), entry.first);
6351 }
6352 for (const GenTxid& gtxid : requestable) {
6353 // Exclude m_lazy_recent_rejects_reconsiderable: we may be requesting a missing parent
6354 // that was previously rejected for being too low feerate.
6355 if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) {
6356 LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx",
6357 gtxid.GetHash().ToString(), pto->GetId());
6358 vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash());
6359 if (vGetData.size() >= MAX_GETDATA_SZ) {
6360 MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData);
6361 vGetData.clear();
6362 }
6363 m_txrequest.RequestedTx(pto->GetId(), gtxid.GetHash(), current_time + GETDATA_TX_INTERVAL);
6364 } else {
6365 // We have already seen this transaction, no need to download. This is just a belt-and-suspenders, as
6366 // this should already be called whenever a transaction becomes AlreadyHaveTx().
6367 m_txrequest.ForgetTxHash(gtxid.GetHash());
6368 }
6369 }
6370 } // release m_tx_download_mutex
6371
6372 if (!vGetData.empty())
6373 MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData);
6374 } // release cs_main
6375 MaybeSendFeefilter(*pto, *peer, current_time);
6376 return true;
6377}
static constexpr CAmount MAX_MONEY
No amount larger than this (in satoshi) is valid.
Definition amount.h:26
bool MoneyRange(const CAmount &nValue)
Definition amount.h:27
int64_t CAmount
Amount in satoshis (Can be negative)
Definition amount.h:12
int ret
ArgsManager & args
Definition bitcoind.cpp:270
@ READ_STATUS_OK
@ READ_STATUS_INVALID
@ READ_STATUS_FAILED
enum ReadStatus_t ReadStatus
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
BlockFilterType
Definition blockfilter.h:93
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
arith_uint256 GetBlockProof(const CBlockIndex &block)
Definition chain.cpp:131
CBlockLocator GetLocator(const CBlockIndex *index)
Get a locator for a block index entry.
Definition chain.cpp:50
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &params)
Return the time it would take to redo the work difference between from and to, assuming the current h...
Definition chain.cpp:146
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition chain.cpp:165
@ BLOCK_VALID_CHAIN
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends,...
Definition chain.h:111
@ BLOCK_VALID_TRANSACTIONS
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid,...
Definition chain.h:107
@ BLOCK_VALID_SCRIPTS
Scripts & signatures ok.
Definition chain.h:115
@ BLOCK_VALID_TREE
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
Definition chain.h:97
@ BLOCK_HAVE_DATA
full block available in blk*.dat
Definition chain.h:121
#define Assert(val)
Identity function.
Definition check.h:77
#define Assume(val)
Assume is the identity function.
Definition check.h:89
Stochastic address manager.
Definition addrman.h:88
void Connected(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
We have successfully connected to this peer.
Definition addrman.cpp:1333
bool Good(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
Mark an address record as accessible and attempt to move it to addrman's tried table.
Definition addrman.cpp:1298
bool Add(const std::vector< CAddress > &vAddr, const CNetAddr &source, std::chrono::seconds time_penalty=0s)
Attempt to add one or more addresses to addrman's new table.
Definition addrman.cpp:1293
void SetServices(const CService &addr, ServiceFlags nServices)
Update an entry's service bits.
Definition addrman.cpp:1338
bool IsBanned(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
Return whether net_addr is banned.
Definition banman.cpp:89
bool IsDiscouraged(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
Return whether net_addr is discouraged.
Definition banman.cpp:83
void Discourage(const CNetAddr &net_addr) EXCLUSIVE_LOCKS_REQUIRED(!m_banned_mutex)
Definition banman.cpp:124
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache)
Get a single filter header by block.
std::vector< CTransactionRef > txn
std::vector< uint16_t > indexes
A CService with information about it as peer.
Definition protocol.h:367
ServiceFlags nServices
Serialized as uint64_t in V1, and as CompactSize in V2.
Definition protocol.h:459
static constexpr SerParams V1_NETWORK
Definition protocol.h:408
NodeSeconds nTime
Always included in serialization. The behavior is unspecified if the value is not representable as ui...
Definition protocol.h:457
static constexpr SerParams V2_NETWORK
Definition protocol.h:409
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition block.h:22
uint256 hashPrevBlock
Definition block.h:26
uint256 GetHash() const
Definition block.cpp:11
Definition block.h:69
std::vector< CTransactionRef > vtx
Definition block.h:72
The block chain is a tree shaped structure starting with the genesis block at the root,...
Definition chain.h:141
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition chain.h:147
CBlockHeader GetBlockHeader() const
Definition chain.h:230
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition chain.h:165
bool HaveNumChainTxs() const
Check whether this block and all previous blocks back to the genesis block or an assumeutxo snapshot ...
Definition chain.h:259
uint256 GetBlockHash() const
Definition chain.h:243
int64_t GetBlockTime() const
Definition chain.h:266
unsigned int nTx
Number of transactions in this block.
Definition chain.h:170
bool IsValid(enum BlockStatus nUpTo=BLOCK_VALID_TRANSACTIONS) const EXCLUSIVE_LOCKS_REQUIRED(
Check whether this block index entry is valid up to the passed validity level.
Definition chain.h:295
NodeSeconds Time() const
Definition chain.h:261
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
Definition chain.cpp:120
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition chain.h:153
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition chain.h:208
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
Definition bloom.h:45
bool IsWithinSizeConstraints() const
True if the size is <= MAX_BLOOM_FILTER_SIZE and the number of hash functions is <= MAX_HASH_FUNCS (c...
Definition bloom.cpp:89
An in-memory indexed chain of blocks.
Definition chain.h:417
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
Definition chain.h:433
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
Definition chain.h:453
int Height() const
Return the maximal height in the chain.
Definition chain.h:462
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition chain.h:447
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system.
Definition chainparams.h:81
const Consensus::Params & GetConsensus() const
Definition chainparams.h:93
void ForEachNode(const NodeFn &func)
Definition net.h:1132
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
Definition net.cpp:3855
bool GetNetworkActive() const
Definition net.h:1117
bool GetTryNewOutboundPeer() const
Definition net.cpp:2353
void SetTryNewOutboundPeer(bool flag)
Definition net.cpp:2358
int GetExtraBlockRelayCount() const
Definition net.cpp:2403
void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc)
Definition net.cpp:2173
bool OutboundTargetReached(bool historicalBlockServingLimit) const EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex)
check if the outbound target is reached if param historicalBlockServingLimit is set true,...
Definition net.cpp:3674
void StartExtraBlockRelayPeers()
Definition net.cpp:2364
bool DisconnectNode(const std::string &node)
Definition net.cpp:3574
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
Definition net.cpp:3868
uint32_t GetMappedAS(const CNetAddr &addr) const
Definition net.cpp:3557
int GetExtraFullOutboundCount() const
Definition net.cpp:2389
std::vector< CAddress > GetAddresses(size_t max_addresses, size_t max_pct, std::optional< Network > network, const bool filtered=true) const
Return all or many randomly selected addresses, optionally by network.
Definition net.cpp:3441
bool CheckIncomingNonce(uint64_t nonce)
Definition net.cpp:362
bool ShouldRunInactivityChecks(const CNode &node, std::chrono::seconds now) const
Return true if we should disconnect the peer for failing an inactivity check.
Definition net.cpp:1952
bool GetUseAddrmanOutgoing() const
Definition net.h:1118
RecursiveMutex & GetNodesMutex() const LOCK_RETURNED(m_nodes_mutex)
Fee rate in satoshis per kilovirtualbyte: CAmount / kvB.
Definition feerate.h:33
CAmount GetFeePerK() const
Return the fee in satoshis for a vsize of 1000 vbytes.
Definition feerate.h:63
inv message data
Definition protocol.h:494
bool IsMsgCmpctBlk() const
Definition protocol.h:511
bool IsMsgBlk() const
Definition protocol.h:508
std::string ToString() const
Definition protocol.cpp:77
bool IsMsgWtx() const
Definition protocol.h:509
bool IsGenTxMsg() const
Definition protocol.h:515
bool IsMsgTx() const
Definition protocol.h:507
bool IsMsgFilteredBlk() const
Definition protocol.h:510
uint256 hash
Definition protocol.h:525
bool IsGenBlkMsg() const
Definition protocol.h:519
bool IsMsgWitnessBlk() const
Definition protocol.h:512
Used to relay blocks as header + vector<merkle branch> to filtered nodes.
std::vector< std::pair< unsigned int, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can't reach it ourselves.
Definition netaddress.h:218
bool IsRoutable() const
static constexpr SerParams V1
Definition netaddress.h:231
bool IsValid() const
bool IsLocal() const
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Transport protocol agnostic message container.
Definition net.h:231
Information about a peer.
Definition net.h:670
bool IsFeelerConn() const
Definition net.h:800
const std::chrono::seconds m_connected
Unix epoch time at peer connection.
Definition net.h:703
bool ExpectServicesFromConn() const
Definition net.h:812
std::atomic< int > nVersion
Definition net.h:713
std::atomic_bool m_has_all_wanted_services
Whether this peer provides all services that we want.
Definition net.h:848
bool IsInboundConn() const
Definition net.h:808
bool HasPermission(NetPermissionFlags permission) const
Definition net.h:721
bool IsOutboundOrBlockRelayConn() const
Definition net.h:757
NodeId GetId() const
Definition net.h:891
bool IsManualConn() const
Definition net.h:776
const std::string m_addr_name
Definition net.h:708
std::string ConnectionTypeAsString() const
Definition net.h:945
void SetCommonVersion(int greatest_common_version)
Definition net.h:916
std::atomic< bool > m_bip152_highbandwidth_to
Definition net.h:843
std::atomic_bool m_relays_txs
Whether we should relay transactions to this peer.
Definition net.h:852
std::atomic< bool > m_bip152_highbandwidth_from
Definition net.h:845
void PongReceived(std::chrono::microseconds ping_time)
A ping-pong round trip has completed successfully.
Definition net.h:948
std::atomic_bool fSuccessfullyConnected
fSuccessfullyConnected is set to true on receiving VERACK from the peer.
Definition net.h:725
bool IsAddrFetchConn() const
Definition net.h:804
uint64_t GetLocalNonce() const
Definition net.h:895
const CAddress addr
Definition net.h:705
void SetAddrLocal(const CService &addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex)
May not be called more than once.
Definition net.cpp:582
bool IsBlockOnlyConn() const
Definition net.h:796
int GetCommonVersion() const
Definition net.h:921
bool IsFullOutboundConn() const
Definition net.h:772
Mutex m_subver_mutex
Definition net.h:714
std::atomic_bool fPauseSend
Definition net.h:734
std::optional< std::pair< CNetMessage, bool > > PollMessage() EXCLUSIVE_LOCKS_REQUIRED(!m_msg_process_queue_mutex)
Poll the next message from the processing queue of this connection.
Definition net.cpp:3789
std::atomic_bool m_bloom_filter_loaded
Whether this peer has loaded a bloom filter.
Definition net.h:856
const std::unique_ptr< Transport > m_transport
Transport serializer/deserializer.
Definition net.h:674
const bool m_inbound_onion
Whether this peer is an inbound onion, i.e. connected via our Tor onion service.
Definition net.h:712
std::atomic< std::chrono::seconds > m_last_block_time
UNIX epoch time of the last block received from this peer that we had not yet seen (e....
Definition net.h:863
std::atomic_bool fDisconnect
Definition net.h:728
std::atomic< std::chrono::seconds > m_last_tx_time
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e....
Definition net.h:869
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set.
Definition bloom.h:109
Simple class for background tasks that should be run periodically or once "after a while".
Definition scheduler.h:40
void scheduleEvery(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Repeat f until the scheduler is stopped.
void scheduleFromNow(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Call f once after the delta has passed.
Definition scheduler.h:53
A combination of a network address (CNetAddr) and a (TCP) port.
Definition netaddress.h:531
std::string ToStringAddrPort() const
std::vector< unsigned char > GetKey() const
SipHash-2-4.
Definition siphash.h:15
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
Definition siphash.cpp:77
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data It is treated as if this was the little-endian interpretation of ...
Definition siphash.cpp:28
The basic transaction that is broadcasted on the network and contained in blocks.
bool HasWitness() const
const Wtxid & GetWitnessHash() const LIFETIMEBOUND
const Txid & GetHash() const LIFETIMEBOUND
const std::vector< CTxIn > vin
An input of a transaction.
Definition transaction.h:67
COutPoint prevout
Definition transaction.h:69
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
Definition txmempool.h:304
void RemoveUnbroadcastTx(const uint256 &txid, const bool unchecked=false)
Removes a transaction from the unbroadcast set.
std::set< uint256 > GetUnbroadcastTxs() const
Returns transactions in unbroadcast set.
Definition txmempool.h:705
RecursiveMutex cs
This mutex needs to be locked when accessing mapTx or other members that are guarded by it.
Definition txmempool.h:390
CFeeRate GetMinFee(size_t sizelimit) const
CTransactionRef get(const uint256 &hash) const
size_t DynamicMemoryUsage() const
const Options m_opts
Definition txmempool.h:439
std::vector< TxMempoolInfo > infoAll() const
TxMempoolInfo info(const GenTxid &gtxid) const
uint64_t GetSequence() const EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition txmempool.h:723
TxMempoolInfo info_for_relay(const GenTxid &gtxid, uint64_t last_sequence) const
Returns info for a transaction if its entry_sequence < last_sequence.
bool exists(const GenTxid &gtxid) const
Definition txmempool.h:665
unsigned long size() const
Definition txmempool.h:647
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition validation.h:871
SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(Chainstate ActiveChainstate)() const
Once the background validation chainstate has reached the height which is the base of the UTXO snapsh...
const CBlockIndex * GetBackgroundSyncTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The tip of the background sync chain.
MempoolAcceptResult ProcessTransaction(const CTransactionRef &tx, bool test_accept=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Try to add a transaction to the memory pool.
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network)
RecursiveMutex & GetMutex() const LOCK_RETURNED(
Alias for cs_main.
CBlockIndex * ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
bool ProcessNewBlock(const std::shared_ptr< const CBlock > &block, bool force_processing, bool min_pow_checked, bool *new_block) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
bool BackgroundSyncInProgress() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The state of a background sync (for net processing)
const arith_uint256 & MinimumChainWork() const
Definition validation.h:984
CChain & ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
void ReportHeadersPresync(const arith_uint256 &work, int64_t height, int64_t timestamp)
This is used by net_processing to report pre-synchronization progress of headers, as headers are not ...
bool ProcessNewBlockHeaders(const std::vector< CBlockHeader > &block, bool min_pow_checked, BlockValidationState &state, const CBlockIndex **ppindex=nullptr) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
Double ended buffer combining vector and stream-like interfaces.
Definition streams.h:147
bool empty() const
Definition streams.h:182
size_type size() const
Definition streams.h:181
void ignore(size_t num_ignore)
Definition streams.h:236
int in_avail() const
Definition streams.h:216
Fast randomness source.
Definition random.h:377
uint64_t rand64() noexcept
Generate a random 64-bit integer.
Definition random.h:395
A generic txid reference (txid or wtxid).
bool IsWtxid() const
static GenTxid Wtxid(const uint256 &hash)
const uint256 & GetHash() const LIFETIMEBOUND
static GenTxid Txid(const uint256 &hash)
HeadersSyncState:
@ FINAL
We're done syncing with this peer and can discard any remaining state.
@ PRESYNC
PRESYNC means the peer has not yet demonstrated their chain has sufficient work and we're only buildi...
static Mutex g_msgproc_mutex
Mutex for anything that is only accessed via the msg processing thread.
Definition net.h:992
static bool HasFlag(NetPermissionFlags flags, NetPermissionFlags f)
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< CTransactionRef > &extra_txn)
bool IsTxAvailable(size_t index) const
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
static std::unique_ptr< PeerManager > make(CConnman &connman, AddrMan &addrman, BanMan *banman, ChainstateManager &chainman, CTxMemPool &pool, node::Warnings &warnings, Options opts)
I randrange(I range) noexcept
Generate a random integer in the range [0..range), with range > 0.
Definition random.h:254
bool Contains(Network net) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Definition netbase.h:124
A Span is an object that can refer to a contiguous sequence of objects.
Definition span.h:98
void Add(std::chrono::seconds offset) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add a new time offset sample.
bool WarnIfOutOfSync() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Raise warnings if the median time offset exceeds the warnings threshold.
std::chrono::seconds Median() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Compute and return the median of the collected time offset samples.
A class to track orphan transactions (failed on TX_MISSING_INPUTS) Since we cannot distinguish orphan...
Definition txorphanage.h:28
Data structure to keep track of, and schedule, transaction downloads from peers.
Definition txrequest.h:96
bool IsValid() const
Definition validation.h:122
Result GetResult() const
Definition validation.h:125
std::string ToString() const
Definition validation.h:128
bool IsInvalid() const
Definition validation.h:123
256-bit unsigned big integer.
constexpr bool IsNull() const
Definition uint256.h:46
std::string ToString() const
Definition uint256.cpp:47
constexpr void SetNull()
Definition uint256.h:53
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos) const
Functions for disk access for blocks.
CBlockIndex * LookupBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool ReadRawBlockFromDisk(std::vector< uint8_t > &block, const FlatFilePos &pos) const
bool LoadingBlocks() const
bool IsPruneMode() const
Whether running in -prune mode.
Manages warning messages within a node.
Definition warnings.h:40
const uint256 & ToUint256() const LIFETIMEBOUND
static transaction_identifier FromUint256(const uint256 &id)
256-bit opaque blob.
Definition uint256.h:178
std::string TransportTypeAsString(TransportProtocolType transport_type)
Convert TransportProtocolType enum to a string value.
@ BLOCK_CHECKPOINT
the block failed to meet one of our checkpoints
@ BLOCK_RECENT_CONSENSUS_CHANGE
Invalid by a change to consensus rules more recent than SegWit.
@ BLOCK_HEADER_LOW_WORK
the block header may be on a too-little-work chain
@ BLOCK_INVALID_HEADER
invalid proof of work or time too old
@ BLOCK_CACHED_INVALID
this block was cached as being invalid and we didn't store the reason why
@ BLOCK_CONSENSUS
invalid by consensus rules (excluding any below reasons)
@ BLOCK_MISSING_PREV
We don't have the previous block the checked one is built on.
@ BLOCK_INVALID_PREV
A block this one builds on is invalid.
@ BLOCK_MUTATED
the block's data didn't match the data committed to by the PoW
@ BLOCK_TIME_FUTURE
block timestamp was > 2 hours in the future (or our clock is bad)
@ BLOCK_RESULT_UNSET
initial value. Block has not yet been rejected
@ TX_MISSING_INPUTS
transaction was missing some of its inputs
@ TX_MEMPOOL_POLICY
violated mempool's fee/size/descendant/RBF/etc limits
@ TX_UNKNOWN
transaction was not validated because package failed
@ TX_PREMATURE_SPEND
transaction spends a coinbase too early, or violates locktime/sequence locks
@ TX_INPUTS_NOT_STANDARD
inputs (covered by txid) failed policy rules
@ TX_WITNESS_STRIPPED
Transaction is missing a witness.
@ TX_CONFLICT
Tx already in mempool or conflicts with a tx in the chain (if it conflicts with another tx in mempool...
@ TX_RECENT_CONSENSUS_CHANGE
Invalid by a change to consensus rules more recent than SegWit.
@ TX_NOT_STANDARD
otherwise didn't meet our local policy rules
@ TX_WITNESS_MUTATED
Transaction might have a witness prior to SegWit activation, or witness may have been malleated (whic...
@ TX_NO_MEMPOOL
this node does not have a mempool so can't validate the transaction
@ TX_RESULT_UNSET
initial value. Tx has not yet been rejected
@ TX_CONSENSUS
invalid by consensus rules
@ TX_RECONSIDERABLE
fails some policy, but might be acceptable if submitted in a (different) package
static size_t RecursiveDynamicUsage(const CScript &script)
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
Definition cs_main.cpp:8
bool DeploymentActiveAfter(const CBlockIndex *pindexPrev, const Consensus::Params &params, Consensus::BuriedDeployment dep, VersionBitsCache &versionbitscache)
Determine if a deployment is active for the next block.
bool DeploymentActiveAt(const CBlockIndex &index, const Consensus::Params &params, Consensus::BuriedDeployment dep, VersionBitsCache &versionbitscache)
Determine if a deployment is active for this block.
ChainstateRole
This enum describes the various roles a specific Chainstate instance can take.
Definition chain.h:25
bool fLogIPs
Definition logging.cpp:44
#define LogPrintLevel(category, level,...)
Definition logging.h:281
#define LogPrint(category,...)
Definition logging.h:293
#define LogInfo(...)
Definition logging.h:269
#define LogError(...)
Definition logging.h:271
static bool LogAcceptCategory(BCLog::LogFlags category, BCLog::Level level)
Return true if log accepts specified category, at the specified level.
Definition logging.h:239
#define LogDebug(category,...)
Definition logging.h:289
#define LogPrintf(...)
Definition logging.h:274
unsigned int nonce
@ TXPACKAGES
Definition logging.h:72
@ MEMPOOLREJ
Definition logging.h:58
@ MEMPOOL
Definition logging.h:44
@ NET
Definition logging.h:42
@ DEPLOYMENT_SEGWIT
Definition params.h:28
CSerializedNetMsg Make(std::string msg_type, Args &&... args)
constexpr const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter.
Definition protocol.h:180
constexpr const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
Definition protocol.h:192
constexpr const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
Definition protocol.h:186
constexpr const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
Definition protocol.h:107
constexpr const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition protocol.h:123
constexpr const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
Definition protocol.h:75
constexpr const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
Definition protocol.h:212
constexpr const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids".
Definition protocol.h:206
constexpr const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
Definition protocol.h:254
constexpr const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
Definition protocol.h:87
constexpr const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition protocol.h:132
constexpr const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
Definition protocol.h:224
constexpr const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition protocol.h:150
constexpr const char * BLOCKTXN
Contains a BlockTransactions.
Definition protocol.h:218
constexpr const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
Definition protocol.h:242
constexpr const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected.
Definition protocol.h:144
constexpr const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition protocol.h:164
constexpr const char * SENDTXRCNCL
Contains a 4-byte version number and an 8-byte salt.
Definition protocol.h:266
constexpr const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message,...
Definition protocol.h:81
constexpr const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
Definition protocol.h:70
constexpr const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition protocol.h:113
constexpr const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
Definition protocol.h:172
constexpr const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
Definition protocol.h:229
constexpr const char * GETDATA
The getdata message requests one or more data objects from another node.
Definition protocol.h:96
constexpr const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition protocol.h:200
constexpr const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
Definition protocol.h:249
constexpr const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition protocol.h:92
constexpr const char * TX
The tx message transmits a single transaction.
Definition protocol.h:117
constexpr const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
Definition protocol.h:139
constexpr const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
Definition protocol.h:156
constexpr const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition protocol.h:102
constexpr const char * WTXIDRELAY
Indicates that a node prefers to relay transactions via wtxid, rather than txid.
Definition protocol.h:260
constexpr const char * BLOCK
The block message transmits a single serialized block.
Definition protocol.h:127
constexpr const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks,...
Definition protocol.h:237
constexpr const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
Definition protocol.h:65
Functions to serialize / deserialize common bitcoin types.
bool fListen
Definition net.cpp:114
std::string strSubVersion
Subversion as sent to the P2P network in version messages.
Definition net.cpp:117
std::optional< CService > GetLocalAddrForPeer(CNode &node)
Returns a local address that we should advertise to this peer.
Definition net.cpp:235
std::function< void(const CAddress &addr, const std::string &msg_type, Span< const unsigned char > data, bool is_incoming)> CaptureMessage
Defaults to CaptureMessageToFile(), but can be overridden by unit tests.
Definition net.cpp:3956
bool SeenLocal(const CService &addr)
vote for a local address
Definition net.cpp:307
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
Definition net.h:65
static constexpr std::chrono::minutes TIMEOUT_INTERVAL
Time after which to disconnect, after waiting for a ping response (or inactivity).
Definition net.h:57
int64_t NodeId
Definition net.h:97
static constexpr auto HEADERS_RESPONSE_TIME
How long to wait for a peer to respond to a getheaders request.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of address records permitted in an ADDR message.
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET
The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND based inc...
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT
Default time during which a peer must stall block download progress before being disconnected.
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL
Average delay between feefilter broadcasts in seconds.
static constexpr auto EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect.
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch?...
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay.
static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL
Delay between rotating the peers we relay a particular address to.
static constexpr auto MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict.
static constexpr auto CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork.
static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for outbound peers.
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for.
static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT
Maximum number of in-flight transaction requests from a peer.
static constexpr uint64_t CMPCTBLOCKS_VERSION
The compactblocks version we support.
static constexpr auto OVERLOADED_PEER_TX_DELAY
How long to delay requesting transactions from overloaded peers (see MAX_PEER_TX_REQUEST_IN_FLIGHT).
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/ behind headers chain.
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for inbound peers.
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout.
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in multiples of the block interval (i.e.
static constexpr auto STALE_CHECK_INTERVAL
How frequently to check for stale tips.
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
static constexpr unsigned int INVENTORY_BROADCAST_TARGET
Target number of tx inventory items to send per transmission.
static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS
Maximum number of transactions to consider for requesting, per peer.
static constexpr auto TXID_RELAY_DELAY
How long to delay requesting transactions via txids, if we have wtxid-relaying peers.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
static constexpr double MAX_ADDR_RATE_PER_SECOND
The maximum rate of address records we're willing to process on average.
static constexpr auto PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we're willing to serve as compact blocks to peers when requested.
static constexpr auto GETDATA_TX_INTERVAL
How long to wait before downloading a transaction from an additional peer.
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS
Window, in blocks, for connecting to NODE_NETWORK_LIMITED peers.
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX
Maximum timeout for stalling block download.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static constexpr unsigned int INVENTORY_BROADCAST_MAX
Maximum number of inventory items to send per transmission.
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message.
static const unsigned int MAX_INV_SZ
The maximum number of entries in an 'inv' protocol message.
static constexpr auto NONPREF_PEER_TX_DELAY
How long to delay requesting transactions from non-preferred peers.
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK
Maximum number of outstanding CMPCTBLOCK requests for the same block.
ReachableNets g_reachable_nets
Definition netbase.cpp:43
bool IsProxy(const CNetAddr &addr)
Definition netbase.cpp:718
uint256 GetPackageHash(const std::vector< CTransactionRef > &transactions)
Get the hash of these transactions' wtxids, concatenated in lexicographical order (treating the wtxid...
Definition packages.cpp:151
std::vector< CTransactionRef > Package
A package is an ordered list of transactions.
Definition packages.h:50
static constexpr unsigned int DEFAULT_MIN_RELAY_TX_FEE
Default for -minrelaytxfee, minimum relay fee for transactions.
Definition policy.h:57
static constexpr TransactionSerParams TX_NO_WITNESS
static constexpr TransactionSerParams TX_WITH_WITNESS
std::shared_ptr< const CTransaction > CTransactionRef
GenTxid ToGenTxid(const CInv &inv)
Convert a TX/WITNESS_TX/WTX CInv to a GenTxid.
Definition protocol.cpp:121
const uint32_t MSG_WITNESS_FLAG
getdata message type flags
Definition protocol.h:470
@ MSG_TX
Definition protocol.h:479
@ MSG_WTX
Defined in BIP 339.
Definition protocol.h:481
@ MSG_BLOCK
Definition protocol.h:480
@ MSG_CMPCT_BLOCK
Defined in BIP152.
Definition protocol.h:484
@ MSG_WITNESS_BLOCK
Defined in BIP144.
Definition protocol.h:485
ServiceFlags
nServices flags
Definition protocol.h:309
@ NODE_NONE
Definition protocol.h:312
@ NODE_WITNESS
Definition protocol.h:320
@ NODE_NETWORK_LIMITED
Definition protocol.h:327
@ NODE_BLOOM
Definition protocol.h:317
@ NODE_NETWORK
Definition protocol.h:315
@ NODE_COMPACT_FILTERS
Definition protocol.h:323
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB.
Definition protocol.h:360
static const int WTXID_RELAY_VERSION
"wtxidrelay" command for wtxid-based relay starts with this version
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version
static const int PROTOCOL_VERSION
network protocol versioning
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition script.h:27
#define LIMITED_STRING(obj, n)
Definition serialize.h:500
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
Definition serialize.h:337
static Wrapper< Formatter, T & > Using(T &&t)
Cause serialization/deserialization of an object to be done using a specified formatter class.
Definition serialize.h:495
constexpr auto MakeUCharSpan(V &&v) -> decltype(UCharSpanCast(Span{std::forward< V >(v)}))
Like the Span constructor, but for (const) unsigned char member types only.
Definition span.h:304
std::vector< Byte > ParseHex(std::string_view hex_str)
Like TryParseHex, but returns an empty vector on invalid input.
Describes a place in the block chain to another node such that if the other node doesn't have the sam...
Definition block.h:124
std::vector< uint256 > vHave
Definition block.h:134
bool IsNull() const
Definition block.h:152
std::chrono::microseconds m_ping_wait
std::vector< int > vHeightInFlight
CAmount m_fee_filter_received
std::chrono::seconds time_offset
uint64_t m_addr_rate_limited
uint64_t m_addr_processed
ServiceFlags their_services
Parameters that influence chain consensus.
Definition params.h:74
int64_t nPowTargetSpacing
Definition params.h:117
std::chrono::seconds PowTargetSpacing() const
Definition params.h:119
Validation result for a transaction evaluated by MemPoolAccept (single or package).
Definition validation.h:128
const ResultType m_result_type
Result type.
Definition validation.h:137
const TxValidationState m_state
Contains information about why the transaction failed.
Definition validation.h:140
const std::list< CTransactionRef > m_replaced_transactions
Mempool transactions replaced by the tx.
Definition validation.h:143
static time_point now() noexcept
Return current system time or mocked time, if set.
Definition time.cpp:21
std::chrono::time_point< NodeClock > time_point
Definition time.h:17
Validation result for package mempool acceptance.
Definition validation.h:234
PackageValidationState m_state
Definition validation.h:235
std::map< uint256, MempoolAcceptResult > m_tx_results
Map from wtxid to finished MempoolAcceptResults.
Definition validation.h:242
CFeeRate min_relay_feerate
A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation)
#define AssertLockNotHeld(cs)
Definition sync.h:147
#define LOCK2(cs1, cs2)
Definition sync.h:258
#define LOCK(cs)
Definition sync.h:257
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition sync.h:301
#define AssertLockHeld(cs)
Definition sync.h:142
static int count
#define EXCLUSIVE_LOCKS_REQUIRED(...)
#define GUARDED_BY(x)
#define LOCKS_EXCLUDED(...)
#define ACQUIRED_BEFORE(...)
#define PT_GUARDED_BY(x)
int64_t GetTime()
DEPRECATED Use either ClockType::now() or Now<TimePointType>() if a cast is needed.
Definition time.cpp:44
T Now()
Return the current time point cast to the given precision.
Definition time.h:91
constexpr int64_t count_microseconds(std::chrono::microseconds t)
Definition time.h:56
constexpr int64_t count_seconds(std::chrono::seconds t)
Definition time.h:54
std::chrono::time_point< NodeClock, std::chrono::seconds > NodeSeconds
Definition time.h:23
constexpr auto Ticks(Dur2 d)
Helper to count the seconds of a duration/time_point.
Definition time.h:45
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
#define TRACE6(context, event, a, b, c, d, e, f)
Definition trace.h:36
static TxMempoolInfo GetInfo(CTxMemPool::indexed_transaction_set::const_iterator it)
ReconciliationRegisterResult
static constexpr uint32_t TXRECONCILIATION_VERSION
Supported transaction reconciliation protocol version.
std::string SanitizeString(std::string_view str, int rule)
Remove unsafe chars.
PackageMempoolAcceptResult ProcessNewPackage(Chainstate &active_chainstate, CTxMemPool &pool, const Package &package, bool test_accept, const std::optional< CFeeRate > &client_maxfeerate)
Validate (and maybe submit) a package to the mempool.
bool IsBlockMutated(const CBlock &block, bool check_witness_root)
Check if a block has been mutated (with respect to its merkle root and witness commitments).
bool HasValidProofOfWork(const std::vector< CBlockHeader > &headers, const Consensus::Params &consensusParams)
Check with the proof of work on each blockheader matches the value in nBits.
arith_uint256 CalculateClaimedHeadersWork(const std::vector< CBlockHeader > &headers)
Return the sum of the claimed work on a given set of headers.
assert(!tx.IsCoinBase())
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ActiveChain().Tip() will not be pr...
Definition validation.h:68