forked from ethereum/go-ethereum
-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathdatabase.go
625 lines (571 loc) · 23.8 KB
/
database.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import (
"bytes"
"errors"
"fmt"
"maps"
"os"
"path/filepath"
"slices"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/log"
"github.com/olekukonko/tablewriter"
)
// freezerdb is a database wrapper that enables ancient chain segment freezing.
type freezerdb struct {
ethdb.KeyValueStore
*chainFreezer
readOnly bool
ancientRoot string
}
// AncientDatadir returns the path of root ancient directory.
func (frdb *freezerdb) AncientDatadir() (string, error) {
return frdb.ancientRoot, nil
}
// Close implements io.Closer, closing both the fast key-value store as well as
// the slow ancient tables.
func (frdb *freezerdb) Close() error {
var errs []error
if err := frdb.chainFreezer.Close(); err != nil {
errs = append(errs, err)
}
if err := frdb.KeyValueStore.Close(); err != nil {
errs = append(errs, err)
}
if len(errs) != 0 {
return fmt.Errorf("%v", errs)
}
return nil
}
// Freeze is a helper method used for external testing to trigger and block until
// a freeze cycle completes, without having to sleep for a minute to trigger the
// automatic background run.
func (frdb *freezerdb) Freeze() error {
if frdb.readOnly {
return errReadOnly
}
// Trigger a freeze cycle and block until it's done
trigger := make(chan struct{}, 1)
frdb.chainFreezer.trigger <- trigger
<-trigger
return nil
}
// nofreezedb is a database wrapper that disables freezer data retrievals.
type nofreezedb struct {
ethdb.KeyValueStore
}
// HasAncient returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) {
return false, errNotSupported
}
// Ancient returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
return nil, errNotSupported
}
// AncientRange returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
return nil, errNotSupported
}
// Ancients returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) Ancients() (uint64, error) {
return 0, errNotSupported
}
// Tail returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) Tail() (uint64, error) {
return 0, errNotSupported
}
// AncientSize returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
return 0, errNotSupported
}
// ModifyAncients is not supported.
func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) {
return 0, errNotSupported
}
// TruncateHead returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) TruncateHead(items uint64) (uint64, error) {
return 0, errNotSupported
}
// TruncateTail returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) TruncateTail(items uint64) (uint64, error) {
return 0, errNotSupported
}
// Sync returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) Sync() error {
return errNotSupported
}
func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
// Unlike other ancient-related methods, this method does not return
// errNotSupported when invoked.
// The reason for this is that the caller might want to do several things:
// 1. Check if something is in the freezer,
// 2. If not, check leveldb.
//
// This will work, since the ancient-checks inside 'fn' will return errors,
// and the leveldb work will continue.
//
// If we instead were to return errNotSupported here, then the caller would
// have to explicitly check for that, having an extra clause to do the
// non-ancient operations.
return fn(db)
}
// AncientDatadir returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) AncientDatadir() (string, error) {
return "", errNotSupported
}
// NewDatabase creates a high level database on top of a given key-value data
// store without a freezer moving immutable chain segments into cold storage.
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
return &nofreezedb{KeyValueStore: db}
}
// resolveChainFreezerDir is a helper function which resolves the absolute path
// of chain freezer by considering backward compatibility.
func resolveChainFreezerDir(ancient string) string {
// Check if the chain freezer is already present in the specified
// sub folder, if not then two possibilities:
// - chain freezer is not initialized
// - chain freezer exists in legacy location (root ancient folder)
freezer := filepath.Join(ancient, ChainFreezerName)
if !common.FileExist(freezer) {
if !common.FileExist(ancient) {
// The entire ancient store is not initialized, still use the sub
// folder for initialization.
} else {
// Ancient root is already initialized, then we hold the assumption
// that chain freezer is also initialized and located in root folder.
// In this case fallback to legacy location.
freezer = ancient
log.Info("Found legacy ancient chain path", "location", ancient)
}
}
return freezer
}
// NewDatabaseWithFreezer creates a high level database on top of a given key-
// value data store with a freezer moving immutable chain segments into cold
// storage. The passed ancient indicates the path of root ancient directory
// where the chain freezer can be opened.
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly bool) (ethdb.Database, error) {
// Create the idle freezer instance. If the given ancient directory is empty,
// in-memory chain freezer is used (e.g. dev mode); otherwise the regular
// file-based freezer is created.
chainFreezerDir := ancient
if chainFreezerDir != "" {
chainFreezerDir = resolveChainFreezerDir(chainFreezerDir)
}
frdb, err := newChainFreezer(chainFreezerDir, namespace, readonly)
if err != nil {
printChainMetadata(db)
return nil, err
}
// Since the freezer can be stored separately from the user's key-value database,
// there's a fairly high probability that the user requests invalid combinations
// of the freezer and database. Ensure that we don't shoot ourselves in the foot
// by serving up conflicting data, leading to both datastores getting corrupted.
//
// - If both the freezer and key-value store are empty (no genesis), we just
// initialized a new empty freezer, so everything's fine.
// - If the key-value store is empty, but the freezer is not, we need to make
// sure the user's genesis matches the freezer. That will be checked in the
// blockchain, since we don't have the genesis block here (nor should we at
// this point care, the key-value/freezer combo is valid).
// - If neither the key-value store nor the freezer is empty, cross validate
// the genesis hashes to make sure they are compatible. If they are, also
// ensure that there's no gap between the freezer and subsequently leveldb.
// - If the key-value store is not empty, but the freezer is, we might just be
// upgrading to the freezer release, or we might have had a small chain and
// not frozen anything yet. Ensure that no blocks are missing yet from the
// key-value store, since that would mean we already had an old freezer.
// If the genesis hash is empty, we have a new key-value store, so nothing to
// validate in this method. If, however, the genesis hash is not nil, compare
// it to the freezer content.
if kvgenesis, _ := db.Get(headerHashKey(0)); len(kvgenesis) > 0 {
if frozen, _ := frdb.Ancients(); frozen > 0 {
// If the freezer already contains something, ensure that the genesis blocks
// match, otherwise we might mix up freezers across chains and destroy both
// the freezer and the key-value store.
frgenesis, err := frdb.Ancient(ChainFreezerHashTable, 0)
if err != nil {
printChainMetadata(db)
return nil, fmt.Errorf("failed to retrieve genesis from ancient %v", err)
} else if !bytes.Equal(kvgenesis, frgenesis) {
printChainMetadata(db)
return nil, fmt.Errorf("genesis mismatch: %#x (leveldb) != %#x (ancients)", kvgenesis, frgenesis)
}
// Key-value store and freezer belong to the same network. Ensure that they
// are contiguous, otherwise we might end up with a non-functional freezer.
if kvhash, _ := db.Get(headerHashKey(frozen)); len(kvhash) == 0 {
// Subsequent header after the freezer limit is missing from the database.
// Reject startup if the database has a more recent head.
if head := *ReadHeaderNumber(db, ReadHeadHeaderHash(db)); head > frozen-1 {
// Find the smallest block stored in the key-value store
// in range of [frozen, head]
var number uint64
for number = frozen; number <= head; number++ {
if present, _ := db.Has(headerHashKey(number)); present {
break
}
}
// We are about to exit on error. Print database metadata before exiting
printChainMetadata(db)
return nil, fmt.Errorf("gap in the chain between ancients [0 - #%d] and leveldb [#%d - #%d] ",
frozen-1, number, head)
}
// Database contains only older data than the freezer, this happens if the
// state was wiped and reinited from an existing freezer.
}
// Otherwise, key-value store continues where the freezer left off, all is fine.
// We might have duplicate blocks (crash after freezer write but before key-value
// store deletion, but that's fine).
} else {
// If the freezer is empty, ensure nothing was moved yet from the key-value
// store, otherwise we'll end up missing data. We check block #1 to decide
// if we froze anything previously or not, but do take care of databases with
// only the genesis block.
if ReadHeadHeaderHash(db) != common.BytesToHash(kvgenesis) {
// Key-value store contains more data than the genesis block, make sure we
// didn't freeze anything yet.
if kvblob, _ := db.Get(headerHashKey(1)); len(kvblob) == 0 {
printChainMetadata(db)
return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path")
}
// Block #1 is still in the database, we're allowed to init a new freezer
}
// Otherwise, the head header is still the genesis, we're allowed to init a new
// freezer.
}
}
// Freezer is consistent with the key-value database, permit combining the two
if !readonly {
frdb.wg.Add(1)
go func() {
frdb.freeze(db)
frdb.wg.Done()
}()
}
return &freezerdb{
ancientRoot: ancient,
KeyValueStore: db,
chainFreezer: frdb,
}, nil
}
// NewMemoryDatabase creates an ephemeral in-memory key-value database without a
// freezer moving immutable chain segments into cold storage.
func NewMemoryDatabase() ethdb.Database {
return NewDatabase(memorydb.New())
}
const (
DBPebble = "pebble"
DBLeveldb = "leveldb"
)
// PreexistingDatabase checks the given data directory whether a database is already
// instantiated at that location, and if so, returns the type of database (or the
// empty string).
func PreexistingDatabase(path string) string {
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
return "" // No pre-existing db
}
if matches, err := filepath.Glob(filepath.Join(path, "OPTIONS*")); len(matches) > 0 || err != nil {
if err != nil {
panic(err) // only possible if the pattern is malformed
}
return DBPebble
}
return DBLeveldb
}
type counter uint64
func (c counter) String() string {
return fmt.Sprintf("%d", c)
}
func (c counter) Percentage(current uint64) string {
return fmt.Sprintf("%d", current*100/uint64(c))
}
// stat stores sizes and count for a parameter
type stat struct {
size common.StorageSize
count counter
}
// Add size to the stat and increase the counter by 1
func (s *stat) Add(size common.StorageSize) {
s.size += size
s.count++
}
func (s *stat) Size() string {
return s.size.String()
}
func (s *stat) Count() string {
return s.count.String()
}
// InspectDatabase traverses the entire database and checks the size
// of all different categories of data.
func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
it := db.NewIterator(keyPrefix, keyStart)
defer it.Release()
var (
count int64
start = time.Now()
logged = time.Now()
// Key-value store statistics
headers stat
bodies stat
receipts stat
tds stat
numHashPairings stat
hashNumPairings stat
legacyTries stat
stateLookups stat
accountTries stat
storageTries stat
codes stat
txLookups stat
accountSnaps stat
storageSnaps stat
preimages stat
beaconHeaders stat
cliqueSnaps stat
bloomBits stat
filterMapRows stat
filterMapLastBlock stat
filterMapBlockLV stat
// Verkle statistics
verkleTries stat
verkleStateLookups stat
// Les statistic
chtTrieNodes stat
bloomTrieNodes stat
// Meta- and unaccounted data
metadata stat
unaccounted stat
// Totals
total common.StorageSize
// This map tracks example keys for unaccounted data.
// For each unique two-byte prefix, the first unaccounted key encountered
// by the iterator will be stored.
unaccountedKeys = make(map[[2]byte][]byte)
)
// Inspect key-value database first.
for it.Next() {
var (
key = it.Key()
size = common.StorageSize(len(key) + len(it.Value()))
)
total += size
switch {
case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength):
headers.Add(size)
case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength):
bodies.Add(size)
case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
receipts.Add(size)
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
tds.Add(size)
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
numHashPairings.Add(size)
case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
hashNumPairings.Add(size)
case IsLegacyTrieNode(key, it.Value()):
legacyTries.Add(size)
case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength:
stateLookups.Add(size)
case IsAccountTrieNode(key):
accountTries.Add(size)
case IsStorageTrieNode(key):
storageTries.Add(size)
case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength:
codes.Add(size)
case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
txLookups.Add(size)
case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength):
accountSnaps.Add(size)
case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
storageSnaps.Add(size)
case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength):
preimages.Add(size)
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
metadata.Add(size)
case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
metadata.Add(size)
case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
beaconHeaders.Add(size)
case bytes.HasPrefix(key, CliqueSnapshotPrefix) && len(key) == 7+common.HashLength:
cliqueSnaps.Add(size)
// new log index
case bytes.HasPrefix(key, filterMapRowPrefix) && len(key) <= len(filterMapRowPrefix)+9:
filterMapRows.Add(size)
case bytes.HasPrefix(key, filterMapLastBlockPrefix) && len(key) == len(filterMapLastBlockPrefix)+4:
filterMapLastBlock.Add(size)
case bytes.HasPrefix(key, filterMapBlockLVPrefix) && len(key) == len(filterMapBlockLVPrefix)+8:
filterMapBlockLV.Add(size)
// old log index (deprecated)
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
bloomBits.Add(size)
case bytes.HasPrefix(key, bloomBitsMetaPrefix) && len(key) < len(bloomBitsMetaPrefix)+8:
bloomBits.Add(size)
// LES indexes (deprecated)
case bytes.HasPrefix(key, chtTablePrefix) ||
bytes.HasPrefix(key, chtIndexTablePrefix) ||
bytes.HasPrefix(key, chtPrefix): // Canonical hash trie
chtTrieNodes.Add(size)
case bytes.HasPrefix(key, bloomTrieTablePrefix) ||
bytes.HasPrefix(key, bloomTrieIndexPrefix) ||
bytes.HasPrefix(key, bloomTriePrefix): // Bloomtrie sub
bloomTrieNodes.Add(size)
// Verkle trie data is detected, determine the sub-category
case bytes.HasPrefix(key, VerklePrefix):
remain := key[len(VerklePrefix):]
switch {
case IsAccountTrieNode(remain):
verkleTries.Add(size)
case bytes.HasPrefix(remain, stateIDPrefix) && len(remain) == len(stateIDPrefix)+common.HashLength:
verkleStateLookups.Add(size)
case bytes.Equal(remain, persistentStateIDKey):
metadata.Add(size)
case bytes.Equal(remain, trieJournalKey):
metadata.Add(size)
case bytes.Equal(remain, snapSyncStatusFlagKey):
metadata.Add(size)
default:
unaccounted.Add(size)
}
// Metadata keys
case slices.ContainsFunc(knownMetadataKeys, func(x []byte) bool { return bytes.Equal(x, key) }):
metadata.Add(size)
default:
unaccounted.Add(size)
if len(key) >= 2 {
prefix := [2]byte(key[:2])
if _, ok := unaccountedKeys[prefix]; !ok {
unaccountedKeys[prefix] = bytes.Clone(key)
}
}
}
count++
if count%1000 == 0 && time.Since(logged) > 8*time.Second {
log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
}
// Display the database statistic of key-value store.
stats := [][]string{
{"Key-Value store", "Headers", headers.Size(), headers.Count()},
{"Key-Value store", "Bodies", bodies.Size(), bodies.Count()},
{"Key-Value store", "Receipt lists", receipts.Size(), receipts.Count()},
{"Key-Value store", "Difficulties (deprecated)", tds.Size(), tds.Count()},
{"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
{"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
{"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
{"Key-Value store", "Log index filter-map rows", filterMapRows.Size(), filterMapRows.Count()},
{"Key-Value store", "Log index last-block-of-map", filterMapLastBlock.Size(), filterMapLastBlock.Count()},
{"Key-Value store", "Log index block-lv", filterMapBlockLV.Size(), filterMapBlockLV.Count()},
{"Key-Value store", "Log bloombits (deprecated)", bloomBits.Size(), bloomBits.Count()},
{"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
{"Key-Value store", "Hash trie nodes", legacyTries.Size(), legacyTries.Count()},
{"Key-Value store", "Path trie state lookups", stateLookups.Size(), stateLookups.Count()},
{"Key-Value store", "Path trie account nodes", accountTries.Size(), accountTries.Count()},
{"Key-Value store", "Path trie storage nodes", storageTries.Size(), storageTries.Count()},
{"Key-Value store", "Verkle trie nodes", verkleTries.Size(), verkleTries.Count()},
{"Key-Value store", "Verkle trie state lookups", verkleStateLookups.Size(), verkleStateLookups.Count()},
{"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
{"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
{"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
{"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()},
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
{"Light client", "CHT trie nodes", chtTrieNodes.Size(), chtTrieNodes.Count()},
{"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()},
}
// Inspect all registered append-only file store then.
ancients, err := inspectFreezers(db)
if err != nil {
return err
}
for _, ancient := range ancients {
for _, table := range ancient.sizes {
stats = append(stats, []string{
fmt.Sprintf("Ancient store (%s)", strings.Title(ancient.name)),
strings.Title(table.name),
table.size.String(),
fmt.Sprintf("%d", ancient.count()),
})
}
total += ancient.size()
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Database", "Category", "Size", "Items"})
table.SetFooter([]string{"", "Total", total.String(), " "})
table.AppendBulk(stats)
table.Render()
if unaccounted.size > 0 {
log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count)
for _, e := range slices.SortedFunc(maps.Values(unaccountedKeys), bytes.Compare) {
log.Error(fmt.Sprintf(" example key: %x", e))
}
}
return nil
}
// This is the list of known 'metadata' keys stored in the databasse.
var knownMetadataKeys = [][]byte{
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, headFinalizedBlockKey,
lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, snapSyncStatusFlagKey,
filterMapsRangeKey,
}
// printChainMetadata prints out chain metadata to stderr.
func printChainMetadata(db ethdb.KeyValueStore) {
fmt.Fprintf(os.Stderr, "Chain metadata\n")
for _, v := range ReadChainMetadata(db) {
fmt.Fprintf(os.Stderr, " %s\n", strings.Join(v, ": "))
}
fmt.Fprintf(os.Stderr, "\n\n")
}
// ReadChainMetadata returns a set of key/value pairs that contains information
// about the database chain status. This can be used for diagnostic purposes
// when investigating the state of the node.
func ReadChainMetadata(db ethdb.KeyValueStore) [][]string {
pp := func(val *uint64) string {
if val == nil {
return "<nil>"
}
return fmt.Sprintf("%d (%#x)", *val, *val)
}
data := [][]string{
{"databaseVersion", pp(ReadDatabaseVersion(db))},
{"headBlockHash", fmt.Sprintf("%v", ReadHeadBlockHash(db))},
{"headFastBlockHash", fmt.Sprintf("%v", ReadHeadFastBlockHash(db))},
{"headHeaderHash", fmt.Sprintf("%v", ReadHeadHeaderHash(db))},
{"lastPivotNumber", pp(ReadLastPivotNumber(db))},
{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(ReadSnapshotSyncStatus(db)))},
{"snapshotDisabled", fmt.Sprintf("%v", ReadSnapshotDisabled(db))},
{"snapshotJournal", fmt.Sprintf("%d bytes", len(ReadSnapshotJournal(db)))},
{"snapshotRecoveryNumber", pp(ReadSnapshotRecoveryNumber(db))},
{"snapshotRoot", fmt.Sprintf("%v", ReadSnapshotRoot(db))},
{"txIndexTail", pp(ReadTxIndexTail(db))},
}
if b := ReadSkeletonSyncStatus(db); b != nil {
data = append(data, []string{"SkeletonSyncStatus", string(b)})
}
if fmr, ok, _ := ReadFilterMapsRange(db); ok {
data = append(data, []string{"filterMapsRange", fmt.Sprintf("%+v", fmr)})
}
return data
}