diff options
author | Péter Szilágyi <peterke@gmail.com> | 2015-12-28 21:20:37 +0800 |
---|---|---|
committer | Péter Szilágyi <peterke@gmail.com> | 2016-02-16 18:21:08 +0800 |
commit | 7e29b0b5b4e5cf7ded9a5a75789de6f8121caec9 (patch) | |
tree | b1470ba733e04490fb797edce652a3cca802972e /trie/sync_test.go | |
parent | 4f28c5b69d652e12adf8a88f526f459a492e159e (diff) | |
download | dexon-7e29b0b5b4e5cf7ded9a5a75789de6f8121caec9.tar.gz dexon-7e29b0b5b4e5cf7ded9a5a75789de6f8121caec9.tar.zst dexon-7e29b0b5b4e5cf7ded9a5a75789de6f8121caec9.zip |
core/state, trie: add node iterator, test state/trie sync consistency
Diffstat (limited to 'trie/sync_test.go')
-rw-r--r-- | trie/sync_test.go | 102 |
1 files changed, 97 insertions, 5 deletions
diff --git a/trie/sync_test.go b/trie/sync_test.go index 9c036a3a9..94d6edd68 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -18,6 +18,7 @@ package trie import ( "bytes" + "fmt" "testing" "github.com/ethereum/go-ethereum/common" @@ -33,6 +34,7 @@ func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) { // Fill it with some arbitrary data content := make(map[string][]byte) for i := byte(0); i < 255; i++ { + // Map the same data under multiple keys key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i} content[string(key)] = val trie.Update(key, val) @@ -40,9 +42,19 @@ func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) { key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i} content[string(key)] = val trie.Update(key, val) + + // Add some other data to inflate th trie + for j := byte(3); j < 13; j++ { + key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i} + content[string(key)] = val + trie.Update(key, val) + } } trie.Commit() + // Remove any potentially cached data from the test trie creation + globalCache.Clear() + // Return the generated trie return db, trie, content } @@ -50,10 +62,17 @@ func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) { // checkTrieContents cross references a reconstructed trie with an expected data // content map. func checkTrieContents(t *testing.T, db Database, root []byte, content map[string][]byte) { + // Remove any potentially cached data from the trie synchronisation + globalCache.Clear() + + // Check root availability and trie contents trie, err := New(common.BytesToHash(root), db) if err != nil { t.Fatalf("failed to create trie at %x: %v", root, err) } + if err := checkTrieConsistency(db, common.BytesToHash(root)); err != nil { + t.Fatalf("inconsistent trie at %x: %v", root, err) + } for key, val := range content { if have := trie.Get([]byte(key)); bytes.Compare(have, val) != 0 { t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val) @@ -61,6 +80,28 @@ func checkTrieContents(t *testing.T, db Database, root []byte, content map[strin } } +// checkTrieConsistency checks that all nodes in a trie and indeed present. +func checkTrieConsistency(db Database, root common.Hash) (failure error) { + // Capture any panics by the iterator + defer func() { + if r := recover(); r != nil { + failure = fmt.Errorf("%v", r) + } + }() + // Remove any potentially cached data from the test trie creation or previous checks + globalCache.Clear() + + // Create and iterate a trie rooted in a subnode + trie, err := New(root, db) + if err != nil { + return + } + it := NewNodeIterator(trie) + for it.Next() { + } + return nil +} + // Tests that an empty trie is not scheduled for syncing. func TestEmptyTrieSync(t *testing.T) { emptyA, _ := New(common.Hash{}, nil) @@ -102,7 +143,7 @@ func testIterativeTrieSync(t *testing.T, batch int) { } queue = append(queue[:0], sched.Missing(batch)...) } - // Cross check that the two tries re in sync + // Cross check that the two tries are in sync checkTrieContents(t, dstDb, srcTrie.Root(), srcData) } @@ -132,7 +173,7 @@ func TestIterativeDelayedTrieSync(t *testing.T) { } queue = append(queue[len(results):], sched.Missing(10000)...) } - // Cross check that the two tries re in sync + // Cross check that the two tries are in sync checkTrieContents(t, dstDb, srcTrie.Root(), srcData) } @@ -173,7 +214,7 @@ func testIterativeRandomTrieSync(t *testing.T, batch int) { queue[hash] = struct{}{} } } - // Cross check that the two tries re in sync + // Cross check that the two tries are in sync checkTrieContents(t, dstDb, srcTrie.Root(), srcData) } @@ -216,7 +257,7 @@ func TestIterativeRandomDelayedTrieSync(t *testing.T) { queue[hash] = struct{}{} } } - // Cross check that the two tries re in sync + // Cross check that the two tries are in sync checkTrieContents(t, dstDb, srcTrie.Root(), srcData) } @@ -252,6 +293,57 @@ func TestDuplicateAvoidanceTrieSync(t *testing.T) { } queue = append(queue[:0], sched.Missing(0)...) } - // Cross check that the two tries re in sync + // Cross check that the two tries are in sync checkTrieContents(t, dstDb, srcTrie.Root(), srcData) } + +// Tests that at any point in time during a sync, only complete sub-tries are in +// the database. +func TestIncompleteTrieSync(t *testing.T) { + // Create a random trie to copy + srcDb, srcTrie, _ := makeTestTrie() + + // Create a destination trie and sync with the scheduler + dstDb, _ := ethdb.NewMemDatabase() + sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil) + + added := []common.Hash{} + queue := append([]common.Hash{}, sched.Missing(1)...) + for len(queue) > 0 { + // Fetch a batch of trie nodes + results := make([]SyncResult, len(queue)) + for i, hash := range queue { + data, err := srcDb.Get(hash.Bytes()) + if err != nil { + t.Fatalf("failed to retrieve node data for %x: %v", hash, err) + } + results[i] = SyncResult{hash, data} + } + // Process each of the trie nodes + if index, err := sched.Process(results); err != nil { + t.Fatalf("failed to process result #%d: %v", index, err) + } + for _, result := range results { + added = append(added, result.Hash) + } + // Check that all known sub-tries in the synced trie is complete + for _, root := range added { + if err := checkTrieConsistency(dstDb, root); err != nil { + t.Fatalf("trie inconsistent: %v", err) + } + } + // Fetch the next batch to retrieve + queue = append(queue[:0], sched.Missing(1)...) + } + // Sanity check that removing any node from the database is detected + for _, node := range added[1:] { + key := node.Bytes() + value, _ := dstDb.Get(key) + + dstDb.Delete(key) + if err := checkTrieConsistency(dstDb, added[0]); err == nil { + t.Fatalf("trie inconsistency not caught, missing: %x", key) + } + dstDb.Put(key, value) + } +} |