Bitcoin Core 28.0.0
P2P Digital Currency
Loading...
Searching...
No Matches
blockstorage.cpp
Go to the documentation of this file.
1// Copyright (c) 2011-2022 The Bitcoin Core developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5#include <node/blockstorage.h>
6
7#include <arith_uint256.h>
8#include <chain.h>
9#include <consensus/params.h>
11#include <dbwrapper.h>
12#include <flatfile.h>
13#include <hash.h>
15#include <kernel/chainparams.h>
18#include <logging.h>
19#include <pow.h>
20#include <primitives/block.h>
22#include <random.h>
23#include <serialize.h>
24#include <signet.h>
25#include <span.h>
26#include <streams.h>
27#include <sync.h>
28#include <tinyformat.h>
29#include <uint256.h>
30#include <undo.h>
31#include <util/batchpriority.h>
32#include <util/check.h>
33#include <util/fs.h>
35#include <util/strencodings.h>
36#include <util/translation.h>
37#include <validation.h>
38
39#include <map>
40#include <ranges>
41#include <unordered_map>
42
43namespace kernel {
44static constexpr uint8_t DB_BLOCK_FILES{'f'};
45static constexpr uint8_t DB_BLOCK_INDEX{'b'};
46static constexpr uint8_t DB_FLAG{'F'};
47static constexpr uint8_t DB_REINDEX_FLAG{'R'};
48static constexpr uint8_t DB_LAST_BLOCK{'l'};
49// Keys used in previous version that might still be found in the DB:
50// BlockTreeDB::DB_TXINDEX_BLOCK{'T'};
51// BlockTreeDB::DB_TXINDEX{'t'}
52// BlockTreeDB::ReadFlag("txindex")
53
55{
56 return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
57}
58
59bool BlockTreeDB::WriteReindexing(bool fReindexing)
60{
61 if (fReindexing) {
62 return Write(DB_REINDEX_FLAG, uint8_t{'1'});
63 } else {
64 return Erase(DB_REINDEX_FLAG);
65 }
66}
67
68void BlockTreeDB::ReadReindexing(bool& fReindexing)
69{
70 fReindexing = Exists(DB_REINDEX_FLAG);
71}
72
74{
75 return Read(DB_LAST_BLOCK, nFile);
76}
77
78bool BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo)
79{
80 CDBBatch batch(*this);
81 for (const auto& [file, info] : fileInfo) {
82 batch.Write(std::make_pair(DB_BLOCK_FILES, file), *info);
83 }
84 batch.Write(DB_LAST_BLOCK, nLastFile);
85 for (const CBlockIndex* bi : blockinfo) {
86 batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi});
87 }
88 return WriteBatch(batch, true);
89}
90
91bool BlockTreeDB::WriteFlag(const std::string& name, bool fValue)
92{
93 return Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'});
94}
95
96bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue)
97{
98 uint8_t ch;
99 if (!Read(std::make_pair(DB_FLAG, name), ch)) {
100 return false;
101 }
102 fValue = ch == uint8_t{'1'};
103 return true;
104}
105
106bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt)
107{
109 std::unique_ptr<CDBIterator> pcursor(NewIterator());
110 pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
111
112 // Load m_block_index
113 while (pcursor->Valid()) {
114 if (interrupt) return false;
115 std::pair<uint8_t, uint256> key;
116 if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) {
117 CDiskBlockIndex diskindex;
118 if (pcursor->GetValue(diskindex)) {
119 // Construct block index object
120 CBlockIndex* pindexNew = insertBlockIndex(diskindex.ConstructBlockHash());
121 pindexNew->pprev = insertBlockIndex(diskindex.hashPrev);
122 pindexNew->nHeight = diskindex.nHeight;
123 pindexNew->nFile = diskindex.nFile;
124 pindexNew->nDataPos = diskindex.nDataPos;
125 pindexNew->nUndoPos = diskindex.nUndoPos;
126 pindexNew->nVersion = diskindex.nVersion;
127 pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
128 pindexNew->nTime = diskindex.nTime;
129 pindexNew->nBits = diskindex.nBits;
130 pindexNew->nNonce = diskindex.nNonce;
131 pindexNew->nStatus = diskindex.nStatus;
132 pindexNew->nTx = diskindex.nTx;
133
134 if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
135 LogError("%s: CheckProofOfWork failed: %s\n", __func__, pindexNew->ToString());
136 return false;
137 }
138
139 pcursor->Next();
140 } else {
141 LogError("%s: failed to read value\n", __func__);
142 return false;
143 }
144 } else {
145 break;
146 }
147 }
148
149 return true;
150}
151} // namespace kernel
152
153namespace node {
154
156{
157 // First sort by most total work, ...
158 if (pa->nChainWork > pb->nChainWork) return false;
159 if (pa->nChainWork < pb->nChainWork) return true;
160
161 // ... then by earliest time received, ...
162 if (pa->nSequenceId < pb->nSequenceId) return false;
163 if (pa->nSequenceId > pb->nSequenceId) return true;
164
165 // Use pointer address as tie breaker (should only happen with blocks
166 // loaded from disk, as those all have id 0).
167 if (pa < pb) return false;
168 if (pa > pb) return true;
169
170 // Identical blocks.
171 return false;
172}
173
175{
176 return pa->nHeight < pb->nHeight;
177}
178
179std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices()
180{
182 std::vector<CBlockIndex*> rv;
183 rv.reserve(m_block_index.size());
184 for (auto& [_, block_index] : m_block_index) {
185 rv.push_back(&block_index);
186 }
187 return rv;
188}
189
191{
193 BlockMap::iterator it = m_block_index.find(hash);
194 return it == m_block_index.end() ? nullptr : &it->second;
195}
196
198{
200 BlockMap::const_iterator it = m_block_index.find(hash);
201 return it == m_block_index.end() ? nullptr : &it->second;
202}
203
205{
207
208 auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block);
209 if (!inserted) {
210 return &mi->second;
211 }
212 CBlockIndex* pindexNew = &(*mi).second;
213
214 // We assign the sequence id to blocks only when the full data is available,
215 // to avoid miners withholding blocks but broadcasting headers, to get a
216 // competitive advantage.
217 pindexNew->nSequenceId = 0;
218
219 pindexNew->phashBlock = &((*mi).first);
220 BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
221 if (miPrev != m_block_index.end()) {
222 pindexNew->pprev = &(*miPrev).second;
223 pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
224 pindexNew->BuildSkip();
225 }
226 pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
227 pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
229 if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) {
230 best_header = pindexNew;
231 }
232
233 m_dirty_blockindex.insert(pindexNew);
234
235 return pindexNew;
236}
237
238void BlockManager::PruneOneBlockFile(const int fileNumber)
239{
242
243 for (auto& entry : m_block_index) {
244 CBlockIndex* pindex = &entry.second;
245 if (pindex->nFile == fileNumber) {
246 pindex->nStatus &= ~BLOCK_HAVE_DATA;
247 pindex->nStatus &= ~BLOCK_HAVE_UNDO;
248 pindex->nFile = 0;
249 pindex->nDataPos = 0;
250 pindex->nUndoPos = 0;
251 m_dirty_blockindex.insert(pindex);
252
253 // Prune from m_blocks_unlinked -- any block we prune would have
254 // to be downloaded again in order to consider its chain, at which
255 // point it would be considered as a candidate for
256 // m_blocks_unlinked or setBlockIndexCandidates.
257 auto range = m_blocks_unlinked.equal_range(pindex->pprev);
258 while (range.first != range.second) {
259 std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
260 range.first++;
261 if (_it->second == pindex) {
262 m_blocks_unlinked.erase(_it);
263 }
264 }
265 }
266 }
267
268 m_blockfile_info.at(fileNumber) = CBlockFileInfo{};
269 m_dirty_fileinfo.insert(fileNumber);
270}
271
273 std::set<int>& setFilesToPrune,
274 int nManualPruneHeight,
275 const Chainstate& chain,
276 ChainstateManager& chainman)
277{
278 assert(IsPruneMode() && nManualPruneHeight > 0);
279
281 if (chain.m_chain.Height() < 0) {
282 return;
283 }
284
285 const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight);
286
287 int count = 0;
288 for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
289 const auto& fileinfo = m_blockfile_info[fileNumber];
290 if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
291 continue;
292 }
293
294 PruneOneBlockFile(fileNumber);
295 setFilesToPrune.insert(fileNumber);
296 count++;
297 }
298 LogPrintf("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs\n",
299 chain.GetRole(), last_block_can_prune, count);
300}
301
303 std::set<int>& setFilesToPrune,
304 int last_prune,
305 const Chainstate& chain,
306 ChainstateManager& chainman)
307{
309 // Distribute our -prune budget over all chainstates.
310 const auto target = std::max(
312 const uint64_t target_sync_height = chainman.m_best_header->nHeight;
313
314 if (chain.m_chain.Height() < 0 || target == 0) {
315 return;
316 }
317 if (static_cast<uint64_t>(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) {
318 return;
319 }
320
321 const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, last_prune);
322
323 uint64_t nCurrentUsage = CalculateCurrentUsage();
324 // We don't check to prune until after we've allocated new space for files
325 // So we should leave a buffer under our target to account for another allocation
326 // before the next pruning.
327 uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
328 uint64_t nBytesToPrune;
329 int count = 0;
330
331 if (nCurrentUsage + nBuffer >= target) {
332 // On a prune event, the chainstate DB is flushed.
333 // To avoid excessive prune events negating the benefit of high dbcache
334 // values, we should not prune too rapidly.
335 // So when pruning in IBD, increase the buffer to avoid a re-prune too soon.
336 const auto chain_tip_height = chain.m_chain.Height();
337 if (chainman.IsInitialBlockDownload() && target_sync_height > (uint64_t)chain_tip_height) {
338 // Since this is only relevant during IBD, we assume blocks are at least 1 MB on average
339 static constexpr uint64_t average_block_size = 1000000; /* 1 MB */
340 const uint64_t remaining_blocks = target_sync_height - chain_tip_height;
341 nBuffer += average_block_size * remaining_blocks;
342 }
343
344 for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
345 const auto& fileinfo = m_blockfile_info[fileNumber];
346 nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize;
347
348 if (fileinfo.nSize == 0) {
349 continue;
350 }
351
352 if (nCurrentUsage + nBuffer < target) { // are we below our target?
353 break;
354 }
355
356 // don't prune files that could have a block that's not within the allowable
357 // prune range for the chain being pruned.
358 if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
359 continue;
360 }
361
362 PruneOneBlockFile(fileNumber);
363 // Queue up the files for removal
364 setFilesToPrune.insert(fileNumber);
365 nCurrentUsage -= nBytesToPrune;
366 count++;
367 }
368 }
369
370 LogPrint(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n",
371 chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024,
372 (int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024,
373 min_block_to_prune, last_block_can_prune, count);
374}
375
376void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) {
378 m_prune_locks[name] = lock_info;
379}
380
382{
384
385 if (hash.IsNull()) {
386 return nullptr;
387 }
388
389 const auto [mi, inserted]{m_block_index.try_emplace(hash)};
390 CBlockIndex* pindex = &(*mi).second;
391 if (inserted) {
392 pindex->phashBlock = &((*mi).first);
393 }
394 return pindex;
395}
396
397bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash)
398{
399 if (!m_block_tree_db->LoadBlockIndexGuts(
400 GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) {
401 return false;
402 }
403
404 if (snapshot_blockhash) {
405 const std::optional<AssumeutxoData> maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash);
406 if (!maybe_au_data) {
407 m_opts.notifications.fatalError(strprintf(_("Assumeutxo data not found for the given blockhash '%s'."), snapshot_blockhash->ToString()));
408 return false;
409 }
410 const AssumeutxoData& au_data = *Assert(maybe_au_data);
411 m_snapshot_height = au_data.height;
412 CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)};
413
414 // Since m_chain_tx_count (responsible for estimated progress) isn't persisted
415 // to disk, we must bootstrap the value for assumedvalid chainstates
416 // from the hardcoded assumeutxo chainparams.
417 base->m_chain_tx_count = au_data.m_chain_tx_count;
418 LogPrintf("[snapshot] set m_chain_tx_count=%d for %s\n", au_data.m_chain_tx_count, snapshot_blockhash->ToString());
419 } else {
420 // If this isn't called with a snapshot blockhash, make sure the cached snapshot height
421 // is null. This is relevant during snapshot completion, when the blockman may be loaded
422 // with a height that then needs to be cleared after the snapshot is fully validated.
423 m_snapshot_height.reset();
424 }
425
426 Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value());
427
428 // Calculate nChainWork
429 std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
430 std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
432
433 CBlockIndex* previous_index{nullptr};
434 for (CBlockIndex* pindex : vSortedByHeight) {
435 if (m_interrupt) return false;
436 if (previous_index && pindex->nHeight > previous_index->nHeight + 1) {
437 LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1);
438 return false;
439 }
440 previous_index = pindex;
441 pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
442 pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
443
444 // We can link the chain of blocks for which we've received transactions at some point, or
445 // blocks that are assumed-valid on the basis of snapshot load (see
446 // PopulateAndValidateSnapshot()).
447 // Pruned nodes may have deleted the block.
448 if (pindex->nTx > 0) {
449 if (pindex->pprev) {
450 if (m_snapshot_height && pindex->nHeight == *m_snapshot_height &&
451 pindex->GetBlockHash() == *snapshot_blockhash) {
452 // Should have been set above; don't disturb it with code below.
453 Assert(pindex->m_chain_tx_count > 0);
454 } else if (pindex->pprev->m_chain_tx_count > 0) {
455 pindex->m_chain_tx_count = pindex->pprev->m_chain_tx_count + pindex->nTx;
456 } else {
457 pindex->m_chain_tx_count = 0;
458 m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
459 }
460 } else {
461 pindex->m_chain_tx_count = pindex->nTx;
462 }
463 }
464 if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
465 pindex->nStatus |= BLOCK_FAILED_CHILD;
466 m_dirty_blockindex.insert(pindex);
467 }
468 if (pindex->pprev) {
469 pindex->BuildSkip();
470 }
471 }
472
473 return true;
474}
475
476bool BlockManager::WriteBlockIndexDB()
477{
479 std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
480 vFiles.reserve(m_dirty_fileinfo.size());
481 for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) {
482 vFiles.emplace_back(*it, &m_blockfile_info[*it]);
483 m_dirty_fileinfo.erase(it++);
484 }
485 std::vector<const CBlockIndex*> vBlocks;
486 vBlocks.reserve(m_dirty_blockindex.size());
487 for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) {
488 vBlocks.push_back(*it);
489 m_dirty_blockindex.erase(it++);
490 }
491 int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
492 if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) {
493 return false;
494 }
495 return true;
496}
497
498bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash)
499{
500 if (!LoadBlockIndex(snapshot_blockhash)) {
501 return false;
502 }
503 int max_blockfile_num{0};
504
505 // Load block file info
506 m_block_tree_db->ReadLastBlockFile(max_blockfile_num);
507 m_blockfile_info.resize(max_blockfile_num + 1);
508 LogPrintf("%s: last block file = %i\n", __func__, max_blockfile_num);
509 for (int nFile = 0; nFile <= max_blockfile_num; nFile++) {
510 m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
511 }
512 LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[max_blockfile_num].ToString());
513 for (int nFile = max_blockfile_num + 1; true; nFile++) {
514 CBlockFileInfo info;
515 if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
516 m_blockfile_info.push_back(info);
517 } else {
518 break;
519 }
520 }
521
522 // Check presence of blk files
523 LogPrintf("Checking all blk files are present...\n");
524 std::set<int> setBlkDataFiles;
525 for (const auto& [_, block_index] : m_block_index) {
526 if (block_index.nStatus & BLOCK_HAVE_DATA) {
527 setBlkDataFiles.insert(block_index.nFile);
528 }
529 }
530 for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
531 FlatFilePos pos(*it, 0);
532 if (OpenBlockFile(pos, true).IsNull()) {
533 return false;
534 }
535 }
536
537 {
538 // Initialize the blockfile cursors.
540 for (size_t i = 0; i < m_blockfile_info.size(); ++i) {
541 const auto last_height_in_file = m_blockfile_info[i].nHeightLast;
542 m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0};
543 }
544 }
545
546 // Check whether we have ever pruned block & undo files
547 m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
548 if (m_have_pruned) {
549 LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
550 }
551
552 // Check whether we need to continue reindexing
553 bool fReindexing = false;
554 m_block_tree_db->ReadReindexing(fReindexing);
555 if (fReindexing) m_blockfiles_indexed = false;
556
557 return true;
558}
559
560void BlockManager::ScanAndUnlinkAlreadyPrunedFiles()
561{
563 int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
564 if (!m_have_pruned) {
565 return;
566 }
567
568 std::set<int> block_files_to_prune;
569 for (int file_number = 0; file_number < max_blockfile; file_number++) {
570 if (m_blockfile_info[file_number].nSize == 0) {
571 block_files_to_prune.insert(file_number);
572 }
573 }
574
575 UnlinkPrunedFiles(block_files_to_prune);
576}
577
579{
580 const MapCheckpoints& checkpoints = data.mapCheckpoints;
581
582 for (const MapCheckpoints::value_type& i : checkpoints | std::views::reverse) {
583 const uint256& hash = i.second;
584 const CBlockIndex* pindex = LookupBlockIndex(hash);
585 if (pindex) {
586 return pindex;
587 }
588 }
589 return nullptr;
590}
591
592bool BlockManager::IsBlockPruned(const CBlockIndex& block) const
593{
595 return m_have_pruned && !(block.nStatus & BLOCK_HAVE_DATA) && (block.nTx > 0);
596}
597
598const CBlockIndex* BlockManager::GetFirstBlock(const CBlockIndex& upper_block, uint32_t status_mask, const CBlockIndex* lower_block) const
599{
601 const CBlockIndex* last_block = &upper_block;
602 assert((last_block->nStatus & status_mask) == status_mask); // 'upper_block' must satisfy the status mask
603 while (last_block->pprev && ((last_block->pprev->nStatus & status_mask) == status_mask)) {
604 if (lower_block) {
605 // Return if we reached the lower_block
606 if (last_block == lower_block) return lower_block;
607 // if range was surpassed, means that 'lower_block' is not part of the 'upper_block' chain
608 // and so far this is not allowed.
609 assert(last_block->nHeight >= lower_block->nHeight);
610 }
611 last_block = last_block->pprev;
612 }
613 assert(last_block != nullptr);
614 return last_block;
615}
616
617bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block)
618{
619 if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false;
620 return GetFirstBlock(upper_block, BLOCK_HAVE_DATA, &lower_block) == &lower_block;
621}
622
623// If we're using -prune with -reindex, then delete block files that will be ignored by the
624// reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
625// is missing, do the same here to delete any later block files after a gap. Also delete all
626// rev files since they'll be rewritten by the reindex anyway. This ensures that m_blockfile_info
627// is in sync with what's actually on disk by the time we start downloading, so that pruning
628// works correctly.
630{
631 std::map<std::string, fs::path> mapBlockFiles;
632
633 // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
634 // Remove the rev files immediately and insert the blk file paths into an
635 // ordered map keyed by block file index.
636 LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
637 for (fs::directory_iterator it(m_opts.blocks_dir); it != fs::directory_iterator(); it++) {
638 const std::string path = fs::PathToString(it->path().filename());
639 if (fs::is_regular_file(*it) &&
640 path.length() == 12 &&
641 path.substr(8,4) == ".dat")
642 {
643 if (path.substr(0, 3) == "blk") {
644 mapBlockFiles[path.substr(3, 5)] = it->path();
645 } else if (path.substr(0, 3) == "rev") {
646 remove(it->path());
647 }
648 }
649 }
650
651 // Remove all block files that aren't part of a contiguous set starting at
652 // zero by walking the ordered map (keys are block file indices) by
653 // keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
654 // start removing block files.
655 int nContigCounter = 0;
656 for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
657 if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) {
658 nContigCounter++;
659 continue;
660 }
661 remove(item.second);
662 }
663}
664
671
672bool BlockManager::UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const
673{
674 // Open history file to append
675 AutoFile fileout{OpenUndoFile(pos)};
676 if (fileout.IsNull()) {
677 LogError("%s: OpenUndoFile failed\n", __func__);
678 return false;
679 }
680
681 // Write index header
682 unsigned int nSize = GetSerializeSize(blockundo);
683 fileout << GetParams().MessageStart() << nSize;
684
685 // Write undo data
686 long fileOutPos = fileout.tell();
687 if (fileOutPos < 0) {
688 LogError("%s: ftell failed\n", __func__);
689 return false;
690 }
691 pos.nPos = (unsigned int)fileOutPos;
692 fileout << blockundo;
693
694 // calculate & write checksum
695 HashWriter hasher{};
696 hasher << hashBlock;
697 hasher << blockundo;
698 fileout << hasher.GetHash();
699
700 return true;
701}
702
703bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& index) const
704{
705 const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};
706
707 // Open history file to read
708 AutoFile filein{OpenUndoFile(pos, true)};
709 if (filein.IsNull()) {
710 LogError("%s: OpenUndoFile failed for %s\n", __func__, pos.ToString());
711 return false;
712 }
713
714 // Read block
715 uint256 hashChecksum;
716 HashVerifier verifier{filein}; // Use HashVerifier as reserializing may lose data, c.f. commit d342424301013ec47dc146a4beb49d5c9319d80a
717 try {
718 verifier << index.pprev->GetBlockHash();
719 verifier >> blockundo;
720 filein >> hashChecksum;
721 } catch (const std::exception& e) {
722 LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString());
723 return false;
724 }
725
726 // Verify checksum
727 if (hashChecksum != verifier.GetHash()) {
728 LogError("%s: Checksum mismatch at %s\n", __func__, pos.ToString());
729 return false;
730 }
731
732 return true;
733}
734
735bool BlockManager::FlushUndoFile(int block_file, bool finalize)
736{
737 FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
738 if (!m_undo_file_seq.Flush(undo_pos_old, finalize)) {
739 m_opts.notifications.flushError(_("Flushing undo file to disk failed. This is likely the result of an I/O error."));
740 return false;
741 }
742 return true;
743}
744
745bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
746{
747 bool success = true;
749
750 if (m_blockfile_info.size() < 1) {
751 // Return if we haven't loaded any blockfiles yet. This happens during
752 // chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which
753 // then calls FlushStateToDisk()), resulting in a call to this function before we
754 // have populated `m_blockfile_info` via LoadBlockIndexDB().
755 return true;
756 }
757 assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num);
758
759 FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize);
760 if (!m_block_file_seq.Flush(block_pos_old, fFinalize)) {
761 m_opts.notifications.flushError(_("Flushing block file to disk failed. This is likely the result of an I/O error."));
762 success = false;
763 }
764 // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
765 // e.g. during IBD or a sync after a node going offline
766 if (!fFinalize || finalize_undo) {
767 if (!FlushUndoFile(blockfile_num, finalize_undo)) {
768 success = false;
769 }
770 }
771 return success;
772}
773
781
783{
785 auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)];
786 // If the cursor does not exist, it means an assumeutxo snapshot is loaded,
787 // but no blocks past the snapshot height have been written yet, so there
788 // is no data associated with the chainstate, and it is safe not to flush.
789 if (cursor) {
790 return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false);
791 }
792 // No need to log warnings in this case.
793 return true;
794}
795
797{
799
800 uint64_t retval = 0;
801 for (const CBlockFileInfo& file : m_blockfile_info) {
802 retval += file.nSize + file.nUndoSize;
803 }
804 return retval;
805}
806
807void BlockManager::UnlinkPrunedFiles(const std::set<int>& setFilesToPrune) const
808{
809 std::error_code ec;
810 for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
811 FlatFilePos pos(*it, 0);
812 const bool removed_blockfile{fs::remove(m_block_file_seq.FileName(pos), ec)};
813 const bool removed_undofile{fs::remove(m_undo_file_seq.FileName(pos), ec)};
814 if (removed_blockfile || removed_undofile) {
815 LogPrint(BCLog::BLOCKSTORAGE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
816 }
817 }
818}
819
820AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const
821{
822 return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_xor_key};
823}
824
826AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const
827{
828 return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_xor_key};
829}
830
835
836FlatFilePos BlockManager::FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
837{
839
840 const BlockfileType chain_type = BlockfileTypeForHeight(nHeight);
841
842 if (!m_blockfile_cursors[chain_type]) {
843 // If a snapshot is loaded during runtime, we may not have initialized this cursor yet.
844 assert(chain_type == BlockfileType::ASSUMED);
845 const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1};
846 m_blockfile_cursors[chain_type] = new_cursor;
847 LogPrint(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor);
848 }
849 const int last_blockfile = m_blockfile_cursors[chain_type]->file_num;
850
851 int nFile = last_blockfile;
852 if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
853 m_blockfile_info.resize(nFile + 1);
854 }
855
856 bool finalize_undo = false;
857 unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
858 // Use smaller blockfiles in test-only -fastprune mode - but avoid
859 // the possibility of having a block not fit into the block file.
860 if (m_opts.fast_prune) {
861 max_blockfile_size = 0x10000; // 64kiB
862 if (nAddSize >= max_blockfile_size) {
863 // dynamically adjust the blockfile size to be larger than the added size
864 max_blockfile_size = nAddSize + 1;
865 }
866 }
867 assert(nAddSize < max_blockfile_size);
868
869 while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
870 // when the undo file is keeping up with the block file, we want to flush it explicitly
871 // when it is lagging behind (more blocks arrive than are being connected), we let the
872 // undo block write case handle it
873 finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) ==
874 Assert(m_blockfile_cursors[chain_type])->undo_height);
875
876 // Try the next unclaimed blockfile number
877 nFile = this->MaxBlockfileNum() + 1;
878 // Set to increment MaxBlockfileNum() for next iteration
879 m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
880
881 if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
882 m_blockfile_info.resize(nFile + 1);
883 }
884 }
885 FlatFilePos pos;
886 pos.nFile = nFile;
887 pos.nPos = m_blockfile_info[nFile].nSize;
888
889 if (nFile != last_blockfile) {
890 LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n",
891 last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight);
892
893 // Do not propagate the return code. The flush concerns a previous block
894 // and undo file that has already been written to. If a flush fails
895 // here, and we crash, there is no expected additional block data
896 // inconsistency arising from the flush failure here. However, the undo
897 // data may be inconsistent after a crash if the flush is called during
898 // a reindex. A flush error might also leave some of the data files
899 // untrimmed.
900 if (!FlushBlockFile(last_blockfile, /*fFinalize=*/true, finalize_undo)) {
902 "Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n",
903 last_blockfile, finalize_undo, nFile);
904 }
905 // No undo data yet in the new file, so reset our undo-height tracking.
906 m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
907 }
908
909 m_blockfile_info[nFile].AddBlock(nHeight, nTime);
910 m_blockfile_info[nFile].nSize += nAddSize;
911
912 bool out_of_space;
913 size_t bytes_allocated = m_block_file_seq.Allocate(pos, nAddSize, out_of_space);
914 if (out_of_space) {
915 m_opts.notifications.fatalError(_("Disk space is too low!"));
916 return {};
917 }
918 if (bytes_allocated != 0 && IsPruneMode()) {
919 m_check_for_pruning = true;
920 }
921
922 m_dirty_fileinfo.insert(nFile);
923 return pos;
924}
925
926void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos)
927{
929
930 // Update the cursor so it points to the last file.
932 auto& cursor{m_blockfile_cursors[chain_type]};
933 if (!cursor || cursor->file_num < pos.nFile) {
934 m_blockfile_cursors[chain_type] = BlockfileCursor{pos.nFile};
935 }
936
937 // Update the file information with the current block.
938 const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block));
939 const int nFile = pos.nFile;
940 if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
941 m_blockfile_info.resize(nFile + 1);
942 }
943 m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime());
944 m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize);
945 m_dirty_fileinfo.insert(nFile);
946}
947
948bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
949{
950 pos.nFile = nFile;
951
953
954 pos.nPos = m_blockfile_info[nFile].nUndoSize;
955 m_blockfile_info[nFile].nUndoSize += nAddSize;
956 m_dirty_fileinfo.insert(nFile);
957
958 bool out_of_space;
959 size_t bytes_allocated = m_undo_file_seq.Allocate(pos, nAddSize, out_of_space);
960 if (out_of_space) {
961 return FatalError(m_opts.notifications, state, _("Disk space is too low!"));
962 }
963 if (bytes_allocated != 0 && IsPruneMode()) {
964 m_check_for_pruning = true;
965 }
966
967 return true;
968}
969
971{
972 // Open history file to append
973 AutoFile fileout{OpenBlockFile(pos)};
974 if (fileout.IsNull()) {
975 LogError("%s: OpenBlockFile failed\n", __func__);
976 return false;
977 }
978
979 // Write index header
980 unsigned int nSize = GetSerializeSize(TX_WITH_WITNESS(block));
981 fileout << GetParams().MessageStart() << nSize;
982
983 // Write block
984 long fileOutPos = fileout.tell();
985 if (fileOutPos < 0) {
986 LogError("%s: ftell failed\n", __func__);
987 return false;
988 }
989 pos.nPos = (unsigned int)fileOutPos;
990 fileout << TX_WITH_WITNESS(block);
991
992 return true;
993}
994
995bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block)
996{
998 const BlockfileType type = BlockfileTypeForHeight(block.nHeight);
999 auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type]));
1000
1001 // Write undo information to disk
1002 if (block.GetUndoPos().IsNull()) {
1003 FlatFilePos _pos;
1004 if (!FindUndoPos(state, block.nFile, _pos, ::GetSerializeSize(blockundo) + 40)) {
1005 LogError("%s: FindUndoPos failed\n", __func__);
1006 return false;
1007 }
1008 if (!UndoWriteToDisk(blockundo, _pos, block.pprev->GetBlockHash())) {
1009 return FatalError(m_opts.notifications, state, _("Failed to write undo data."));
1010 }
1011 // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
1012 // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
1013 // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
1014 // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
1015 // the FindNextBlockPos function
1016 if (_pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) {
1017 // Do not propagate the return code, a failed flush here should not
1018 // be an indication for a failed write. If it were propagated here,
1019 // the caller would assume the undo data not to be written, when in
1020 // fact it is. Note though, that a failed flush might leave the data
1021 // file untrimmed.
1022 if (!FlushUndoFile(_pos.nFile, true)) {
1023 LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", _pos.nFile);
1024 }
1025 } else if (_pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) {
1026 cursor.undo_height = block.nHeight;
1027 }
1028 // update nUndoPos in block index
1029 block.nUndoPos = _pos.nPos;
1030 block.nStatus |= BLOCK_HAVE_UNDO;
1031 m_dirty_blockindex.insert(&block);
1032 }
1033
1034 return true;
1035}
1036
1038{
1039 block.SetNull();
1040
1041 // Open history file to read
1042 AutoFile filein{OpenBlockFile(pos, true)};
1043 if (filein.IsNull()) {
1044 LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
1045 return false;
1046 }
1047
1048 // Read block
1049 try {
1050 filein >> TX_WITH_WITNESS(block);
1051 } catch (const std::exception& e) {
1052 LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString());
1053 return false;
1054 }
1055
1056 // Check the header
1057 if (!CheckProofOfWork(block.GetHash(), block.nBits, GetConsensus())) {
1058 LogError("%s: Errors in block header at %s\n", __func__, pos.ToString());
1059 return false;
1060 }
1061
1062 // Signet only: check block solution
1063 if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())) {
1064 LogError("%s: Errors in block solution at %s\n", __func__, pos.ToString());
1065 return false;
1066 }
1067
1068 return true;
1069}
1070
1072{
1073 const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())};
1074
1075 if (!ReadBlockFromDisk(block, block_pos)) {
1076 return false;
1077 }
1078 if (block.GetHash() != index.GetBlockHash()) {
1079 LogError("%s: GetHash() doesn't match index for %s at %s\n", __func__, index.ToString(), block_pos.ToString());
1080 return false;
1081 }
1082 return true;
1083}
1084
1085bool BlockManager::ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos) const
1086{
1087 FlatFilePos hpos = pos;
1088 // If nPos is less than 8 the pos is null and we don't have the block data
1089 // Return early to prevent undefined behavior of unsigned int underflow
1090 if (hpos.nPos < 8) {
1091 LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
1092 return false;
1093 }
1094 hpos.nPos -= 8; // Seek back 8 bytes for meta header
1095 AutoFile filein{OpenBlockFile(hpos, true)};
1096 if (filein.IsNull()) {
1097 LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
1098 return false;
1099 }
1100
1101 try {
1102 MessageStartChars blk_start;
1103 unsigned int blk_size;
1104
1105 filein >> blk_start >> blk_size;
1106
1107 if (blk_start != GetParams().MessageStart()) {
1108 LogError("%s: Block magic mismatch for %s: %s versus expected %s\n", __func__, pos.ToString(),
1109 HexStr(blk_start),
1110 HexStr(GetParams().MessageStart()));
1111 return false;
1112 }
1113
1114 if (blk_size > MAX_SIZE) {
1115 LogError("%s: Block data is larger than maximum deserialization size for %s: %s versus %s\n", __func__, pos.ToString(),
1116 blk_size, MAX_SIZE);
1117 return false;
1118 }
1119
1120 block.resize(blk_size); // Zeroing of memory is intentional here
1121 filein.read(MakeWritableByteSpan(block));
1122 } catch (const std::exception& e) {
1123 LogError("%s: Read from block file failed: %s for %s\n", __func__, e.what(), pos.ToString());
1124 return false;
1125 }
1126
1127 return true;
1128}
1129
1131{
1132 unsigned int nBlockSize = ::GetSerializeSize(TX_WITH_WITNESS(block));
1133 // Account for the 4 magic message start bytes + the 4 length bytes (8 bytes total,
1134 // defined as BLOCK_SERIALIZATION_HEADER_SIZE)
1135 nBlockSize += static_cast<unsigned int>(BLOCK_SERIALIZATION_HEADER_SIZE);
1136 FlatFilePos blockPos{FindNextBlockPos(nBlockSize, nHeight, block.GetBlockTime())};
1137 if (blockPos.IsNull()) {
1138 LogError("%s: FindNextBlockPos failed\n", __func__);
1139 return FlatFilePos();
1140 }
1141 if (!WriteBlockToDisk(block, blockPos)) {
1142 m_opts.notifications.fatalError(_("Failed to write block."));
1143 return FlatFilePos();
1144 }
1145 return blockPos;
1146}
1147
1149{
1150 // Bytes are serialized without length indicator, so this is also the exact
1151 // size of the XOR-key file.
1152 std::array<std::byte, 8> xor_key{};
1153
1154 if (opts.use_xor && fs::is_empty(opts.blocks_dir)) {
1155 // Only use random fresh key when the boolean option is set and on the
1156 // very first start of the program.
1157 FastRandomContext{}.fillrand(xor_key);
1158 }
1159
1160 const fs::path xor_key_path{opts.blocks_dir / "xor.dat"};
1161 if (fs::exists(xor_key_path)) {
1162 // A pre-existing xor key file has priority.
1163 AutoFile xor_key_file{fsbridge::fopen(xor_key_path, "rb")};
1164 xor_key_file >> xor_key;
1165 } else {
1166 // Create initial or missing xor key file
1167 AutoFile xor_key_file{fsbridge::fopen(xor_key_path,
1168#ifdef __MINGW64__
1169 "wb" // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210
1170#else
1171 "wbx"
1172#endif
1173 )};
1174 xor_key_file << xor_key;
1175 }
1176 // If the user disabled the key, it must be zero.
1177 if (!opts.use_xor && xor_key != decltype(xor_key){}) {
1178 throw std::runtime_error{
1179 strprintf("The blocksdir XOR-key can not be disabled when a random key was already stored! "
1180 "Stored key: '%s', stored path: '%s'.",
1181 HexStr(xor_key), fs::PathToString(xor_key_path)),
1182 };
1183 }
1184 LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(xor_key));
1185 return std::vector<std::byte>{xor_key.begin(), xor_key.end()};
1186}
1187
1189 : m_prune_mode{opts.prune_target > 0},
1190 m_xor_key{InitBlocksdirXorKey(opts)},
1191 m_opts{std::move(opts)},
1192 m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kB */ : BLOCKFILE_CHUNK_SIZE}},
1193 m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}},
1194 m_interrupt{interrupt} {}
1195
1197{
1198 std::atomic<bool>& m_importing;
1199
1200public:
1201 ImportingNow(std::atomic<bool>& importing) : m_importing{importing}
1202 {
1203 assert(m_importing == false);
1204 m_importing = true;
1205 }
1207 {
1208 assert(m_importing == true);
1209 m_importing = false;
1210 }
1211};
1212
1213void ImportBlocks(ChainstateManager& chainman, std::vector<fs::path> vImportFiles)
1214{
1215 ImportingNow imp{chainman.m_blockman.m_importing};
1216
1217 // -reindex
1218 if (!chainman.m_blockman.m_blockfiles_indexed) {
1219 int nFile = 0;
1220 // Map of disk positions for blocks with unknown parent (only used for reindex);
1221 // parent hash -> child disk position, multiple children can have the same parent.
1222 std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent;
1223 while (true) {
1224 FlatFilePos pos(nFile, 0);
1225 if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) {
1226 break; // No block files left to reindex
1227 }
1228 AutoFile file{chainman.m_blockman.OpenBlockFile(pos, true)};
1229 if (file.IsNull()) {
1230 break; // This error is logged in OpenBlockFile
1231 }
1232 LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile);
1233 chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent);
1234 if (chainman.m_interrupt) {
1235 LogPrintf("Interrupt requested. Exit %s\n", __func__);
1236 return;
1237 }
1238 nFile++;
1239 }
1240 WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false));
1241 chainman.m_blockman.m_blockfiles_indexed = true;
1242 LogPrintf("Reindexing finished\n");
1243 // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
1244 chainman.ActiveChainstate().LoadGenesisBlock();
1245 }
1246
1247 // -loadblock=
1248 for (const fs::path& path : vImportFiles) {
1249 AutoFile file{fsbridge::fopen(path, "rb")};
1250 if (!file.IsNull()) {
1251 LogPrintf("Importing blocks file %s...\n", fs::PathToString(path));
1252 chainman.LoadExternalBlockFile(file);
1253 if (chainman.m_interrupt) {
1254 LogPrintf("Interrupt requested. Exit %s\n", __func__);
1255 return;
1256 }
1257 } else {
1258 LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
1259 }
1260 }
1261
1262 // scan for better chains in the block chain database, that are not yet connected in the active best chain
1263
1264 // We can't hold cs_main during ActivateBestChain even though we're accessing
1265 // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve
1266 // the relevant pointers before the ABC call.
1267 for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
1269 if (!chainstate->ActivateBestChain(state, nullptr)) {
1270 chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString()));
1271 return;
1272 }
1273 }
1274 // End scope of ImportingNow
1275}
1276
1277std::ostream& operator<<(std::ostream& os, const BlockfileType& type) {
1278 switch(type) {
1279 case BlockfileType::NORMAL: os << "normal"; break;
1280 case BlockfileType::ASSUMED: os << "assumed"; break;
1281 default: os.setstate(std::ios_base::failbit);
1282 }
1283 return os;
1284}
1285
1286std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) {
1287 os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height);
1288 return os;
1289}
1290} // namespace node
arith_uint256 GetBlockProof(const CBlockIndex &block)
Definition chain.cpp:131
@ BLOCK_VALID_TREE
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
Definition chain.h:97
@ BLOCK_HAVE_UNDO
undo data available in rev*.dat
Definition chain.h:122
@ BLOCK_HAVE_DATA
full block available in blk*.dat
Definition chain.h:121
@ BLOCK_FAILED_CHILD
descends from failed block
Definition chain.h:126
@ BLOCK_FAILED_MASK
Definition chain.h:127
#define Assert(val)
Identity function.
Definition check.h:77
Non-refcounted RAII wrapper for FILE*.
Definition streams.h:389
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition block.h:22
uint32_t nBits
Definition block.h:29
int64_t GetBlockTime() const
Definition block.h:61
uint256 hashPrevBlock
Definition block.h:26
uint256 GetHash() const
Definition block.cpp:11
Definition block.h:69
void SetNull()
Definition block.h:95
The block chain is a tree shaped structure starting with the genesis block at the root,...
Definition chain.h:141
uint256 hashMerkleRoot
Definition chain.h:188
std::string ToString() const
Definition chain.cpp:15
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition chain.h:147
uint64_t m_chain_tx_count
(memory only) Number of transactions in the chain up to and including this block.
Definition chain.h:176
void BuildSkip()
Build the skiplist pointer for this entry.
Definition chain.cpp:125
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition chain.h:165
uint32_t nTime
Definition chain.h:189
unsigned int nTimeMax
(memory only) Maximum nTime in the chain up to and including this block.
Definition chain.h:197
int32_t nSequenceId
(memory only) Sequential id assigned to distinguish order in which blocks are received.
Definition chain.h:194
uint32_t nNonce
Definition chain.h:191
uint256 GetBlockHash() const
Definition chain.h:243
FlatFilePos GetUndoPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition chain.h:219
uint32_t nBits
Definition chain.h:190
bool RaiseValidity(enum BlockStatus nUpTo) EXCLUSIVE_LOCKS_REQUIRED(
Raise the validity level of this block index entry.
Definition chain.h:307
unsigned int nTx
Number of transactions in this block.
Definition chain.h:170
int32_t nVersion
block header
Definition chain.h:187
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition chain.h:153
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition chain.h:208
const uint256 * phashBlock
pointer to the hash of the block, if any. Memory is owned by this CBlockIndex
Definition chain.h:144
Undo information for a CBlock.
Definition undo.h:63
int Height() const
Return the maximal height in the chain.
Definition chain.h:462
const MessageStartChars & MessageStart() const
Definition chainparams.h:94
std::optional< AssumeutxoData > AssumeutxoForBlockhash(const uint256 &blockhash) const
uint64_t PruneAfterHeight() const
Batch of changes queued to be written to a CDBWrapper.
Definition dbwrapper.h:73
void Write(const K &key, const V &value)
Definition dbwrapper.h:99
bool WriteBatch(CDBBatch &batch, bool fSync=false)
bool Read(const K &key, V &value) const
Definition dbwrapper.h:221
CDBIterator * NewIterator()
bool Erase(const K &key, bool fSync=false)
Definition dbwrapper.h:266
bool Write(const K &key, const V &value, bool fSync=false)
Definition dbwrapper.h:241
bool Exists(const K &key) const
Definition dbwrapper.h:257
Used to marshal pointers into hashes for db storage.
Definition chain.h:355
uint256 hashPrev
Definition chain.h:365
uint256 ConstructBlockHash() const
Definition chain.h:399
Chainstate stores and provides an API to update our local knowledge of the current best chain.
Definition validation.h:513
CChain m_chain
The current chain of blockheaders we consult and build on.
Definition validation.h:593
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition validation.h:871
SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(Chainstate ActiveChainstate)() const
Once the background validation chainstate has reached the height which is the base of the UTXO snapsh...
kernel::Notifications & GetNotifications() const
Definition validation.h:986
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network)
const util::SignalInterrupt & m_interrupt
void LoadExternalBlockFile(AutoFile &file_in, FlatFilePos *dbp=nullptr, std::multimap< uint256, FlatFilePos > *blocks_with_unknown_parent=nullptr)
Import blocks from an external file.
const CChainParams & GetParams() const
Definition validation.h:981
Chainstate &InitializeChainstate(CTxMemPool *mempool) EXCLUSIVE_LOCKS_REQUIRED(std::vector< Chainstate * GetAll)()
Instantiate a new chainstate.
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
Fast randomness source.
Definition random.h:377
void fillrand(Span< std::byte > output) noexcept
Fill a byte Span with random bytes.
Definition random.cpp:699
FlatFileSeq represents a sequence of numbered files storing raw data.
Definition flatfile.h:46
FILE * Open(const FlatFilePos &pos, bool read_only=false) const
Open a handle to the file at the given position.
Definition flatfile.cpp:33
fs::path FileName(const FlatFilePos &pos) const
Get the name of the file at the given position.
Definition flatfile.cpp:28
bool Flush(const FlatFilePos &pos, bool finalize=false) const
Commit a file to disk, and optionally truncate off extra pre-allocated bytes if final.
Definition flatfile.cpp:81
size_t Allocate(const FlatFilePos &pos, size_t add_size, bool &out_of_space) const
Allocate additional space in a file after the given starting position.
Definition flatfile.cpp:55
Reads data from an underlying stream, while hashing the read data.
Definition hash.h:151
A writer stream (for serialization) that computes a 256-bit hash.
Definition hash.h:101
std::string ToString() const
Definition validation.h:128
constexpr bool IsNull() const
Definition uint256.h:46
Path class wrapper to block calls to the fs::path(std::string) implicit constructor and the fs::path:...
Definition fs.h:33
bool ReadLastBlockFile(int &nFile)
bool ReadFlag(const std::string &name, bool &fValue)
bool ReadBlockFileInfo(int nFile, CBlockFileInfo &info)
void ReadReindexing(bool &fReindexing)
bool WriteFlag(const std::string &name, bool fValue)
bool WriteBatchSync(const std::vector< std::pair< int, const CBlockFileInfo * > > &fileInfo, int nLastFile, const std::vector< const CBlockIndex * > &blockinfo)
bool WriteReindexing(bool fReindexing)
virtual void fatalError(const bilingual_str &message)
The fatal error notification is sent to notify the user when an error occurs in kernel code that can'...
virtual void flushError(const bilingual_str &message)
The flush error notification is sent to notify the user that an error occurred while flushing block d...
const kernel::BlockManagerOpts m_opts
std::set< int > m_dirty_fileinfo
Dirty block file entries.
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos) const
Functions for disk access for blocks.
const FlatFileSeq m_undo_file_seq
RecursiveMutex cs_LastBlockFile
const CChainParams & GetParams() const
bool FlushChainstateBlockFile(int tip_height)
void FindFilesToPrune(std::set< int > &setFilesToPrune, int last_prune, const Chainstate &chain, ChainstateManager &chainman)
Prune block and undo files (blk???.dat and rev???.dat) so that the disk space used is less than a use...
void UpdateBlockInfo(const CBlock &block, unsigned int nHeight, const FlatFilePos &pos)
Update blockfile info while processing a block during reindex.
CBlockIndex * LookupBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Mark one block file as pruned (modify associated database entries)
BlockfileType BlockfileTypeForHeight(int height)
std::atomic_bool m_blockfiles_indexed
Whether all blockfiles have been added to the block tree database.
std::vector< CBlockIndex * > GetAllBlockIndices() EXCLUSIVE_LOCKS_REQUIRED(std::multimap< CBlockIndex *, CBlockIndex * > m_blocks_unlinked
All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
const Consensus::Params & GetConsensus() const
BlockManager(const util::SignalInterrupt &interrupt, Options opts)
bool ReadRawBlockFromDisk(std::vector< uint8_t > &block, const FlatFilePos &pos) const
std::set< CBlockIndex * > m_dirty_blockindex
Dirty block index entries.
bool UndoReadFromDisk(CBlockUndo &blockundo, const CBlockIndex &index) const
const std::vector< std::byte > m_xor_key
fs::path GetBlockPosFilename(const FlatFilePos &pos) const
Translation to a filesystem path.
bool FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
Return false if block file or undo file flushing fails.
uint64_t GetPruneTarget() const
Attempt to stay below this number of bytes of block files.
bool WriteBlockToDisk(const CBlock &block, FlatFilePos &pos) const
Write a block to disk.
int MaxBlockfileNum() const EXCLUSIVE_LOCKS_REQUIRED(cs_LastBlockFile)
void UnlinkPrunedFiles(const std::set< int > &setFilesToPrune) const
Actually unlink the specified files.
FlatFilePos FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
Helper function performing various preparations before a block can be saved to disk: Returns the corr...
bool CheckBlockDataAvailability(const CBlockIndex &upper_block LIFETIMEBOUND, const CBlockIndex &lower_block LIFETIMEBOUND) EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetFirstBlock(const CBlockIndex &upper_block LIFETIMEBOUND, uint32_t status_mask, const CBlockIndex *lower_block=nullptr) const EXCLUSIVE_LOCKS_REQUIRED(boo m_have_pruned)
Check if all blocks in the [upper_block, lower_block] range have data available.
bool FlushUndoFile(int block_file, bool finalize=false)
Return false if undo file flushing fails.
uint64_t CalculateCurrentUsage()
Calculate the amount of disk space the block & undo files currently use.
const util::SignalInterrupt & m_interrupt
const CBlockIndex * GetLastCheckpoint(const CCheckpointData &data) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Returns last CBlockIndex* that is a checkpoint.
bool IsBlockPruned(const CBlockIndex &block) const EXCLUSIVE_LOCKS_REQUIRED(void UpdatePruneLock(const std::string &name, const PruneLockInfo &lock_info) EXCLUSIVE_LOCKS_REQUIRED(AutoFil OpenBlockFile)(const FlatFilePos &pos, bool fReadOnly=false) const
Check whether the block associated with this index entry is pruned or not.
const FlatFileSeq m_block_file_seq
CBlockIndex * InsertBlockIndex(const uint256 &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Create a new block index entry for a given block hash.
bool WriteBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(bool LoadBlockIndexDB(const std::optional< uint256 > &snapshot_blockhash) EXCLUSIVE_LOCKS_REQUIRED(void ScanAndUnlinkAlreadyPrunedFiles() EXCLUSIVE_LOCKS_REQUIRED(CBlockIndex AddToBlockIndex)(const CBlockHeader &block, CBlockIndex *&best_header) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Remove any pruned block & undo files that are still on disk.
bool m_check_for_pruning
Global flag to indicate we should check to see if there are block/undo files that should be deleted.
bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
bool IsPruneMode() const
Whether running in -prune mode.
void CleanupBlockRevFiles() const
void FindFilesToPruneManual(std::set< int > &setFilesToPrune, int nManualPruneHeight, const Chainstate &chain, ChainstateManager &chainman)
std::atomic< bool > m_importing
std::vector< CBlockFileInfo > m_blockfile_info
bool UndoWriteToDisk(const CBlockUndo &blockundo, FlatFilePos &pos, const uint256 &hashBlock) const
bool WriteUndoDataForBlock(const CBlockUndo &blockundo, BlockValidationState &state, CBlockIndex &block) EXCLUSIVE_LOCKS_REQUIRED(FlatFilePo SaveBlockToDisk)(const CBlock &block, int nHeight)
Store block on disk and update block file statistics.
CBlockFileInfo * GetBlockFileInfo(size_t n)
Get block file info entry for one block file.
bool LoadBlockIndex(const std::optional< uint256 > &snapshot_blockhash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Load the blocktree off disk and into memory.
AutoFile OpenUndoFile(const FlatFilePos &pos, bool fReadOnly=false) const
Open an undo file (rev?????.dat)
std::optional< int > m_snapshot_height
The height of the base block of an assumeutxo snapshot, if one is in use.
ImportingNow(std::atomic< bool > &importing)
std::atomic< bool > & m_importing
256-bit opaque blob.
Definition uint256.h:178
Helper class that manages an interrupt flag, and allows a thread or signal to interrupt another threa...
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
Definition cs_main.cpp:8
std::string HexStr(const Span< const uint8_t > s)
Convert a span of bytes to a lower-case hexadecimal string.
Definition hex_base.cpp:29
std::map< int, uint256 > MapCheckpoints
Definition chainparams.h:27
#define LogPrintLevel(category, level,...)
Definition logging.h:281
#define LogPrint(category,...)
Definition logging.h:293
#define LogInfo(...)
Definition logging.h:269
#define LogError(...)
Definition logging.h:271
#define LogPrintf(...)
Definition logging.h:274
unsigned int nHeight
std::array< uint8_t, 4 > MessageStartChars
@ BLOCKSTORAGE
Definition logging.h:69
@ PRUNE
Definition logging.h:56
static bool exists(const path &p)
Definition fs.h:89
static std::string PathToString(const path &path)
Convert path object to a byte string.
Definition fs.h:151
FILE * fopen(const fs::path &p, const char *mode)
Definition fs.cpp:26
static constexpr uint8_t DB_REINDEX_FLAG
static constexpr uint8_t DB_FLAG
static constexpr uint8_t DB_BLOCK_INDEX
static constexpr uint8_t DB_LAST_BLOCK
static constexpr uint8_t DB_BLOCK_FILES
static const unsigned int UNDOFILE_CHUNK_SIZE
The pre-allocation chunk size for rev?????.dat files (since 0.8)
BlockfileType
void ImportBlocks(ChainstateManager &chainman, std::vector< fs::path > vImportFiles)
static auto InitBlocksdirXorKey(const BlockManager::Options &opts)
static const unsigned int BLOCKFILE_CHUNK_SIZE
The pre-allocation chunk size for blk?????.dat files (since 0.8)
std::ostream & operator<<(std::ostream &os, const BlockfileType &type)
static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE
Size of header written by WriteBlockToDisk before a serialized CBlock.
bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params &params)
Check whether a block hash satisfies the proof-of-work requirement specified by nBits.
Definition pow.cpp:137
static constexpr TransactionSerParams TX_WITH_WITNESS
const char * name
Definition rest.cpp:49
size_t GetSerializeSize(const T &t)
Definition serialize.h:1101
static constexpr uint64_t MAX_SIZE
The maximum size of a serialized object in bytes or number of elements (for eg vectors) when the size...
Definition serialize.h:32
bool CheckSignetBlockSolution(const CBlock &block, const Consensus::Params &consensusParams)
Extract signature and check whether a block has a valid solution.
Definition signet.cpp:124
Span< std::byte > MakeWritableByteSpan(V &&v) noexcept
Definition span.h:282
T LocaleIndependentAtoi(std::string_view str)
Holds configuration for use during UTXO snapshot load and validation.
Definition chainparams.h:47
uint64_t m_chain_tx_count
Used to populate the m_chain_tx_count value, which is used during BlockManager::LoadBlockIndex().
Definition chainparams.h:57
MapCheckpoints mapCheckpoints
Definition chainparams.h:30
Parameters that influence chain consensus.
Definition params.h:74
std::string ToString() const
Definition flatfile.cpp:23
unsigned int nPos
Definition flatfile.h:17
bool IsNull() const
Definition flatfile.h:36
An options struct for BlockManager, more ergonomically referred to as BlockManager::Options due to th...
bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const
bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const
#define LOCK2(cs1, cs2)
Definition sync.h:258
#define LOCK(cs)
Definition sync.h:257
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition sync.h:301
#define AssertLockHeld(cs)
Definition sync.h:142
static int count
#define EXCLUSIVE_LOCKS_REQUIRED(...)
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
bilingual_str _(ConstevalStringLiteral str)
Translation function.
Definition translation.h:80
bool FatalError(Notifications &notifications, BlockValidationState &state, const bilingual_str &message)
assert(!tx.IsCoinBase())
static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES
Definition validation.h:79