From 345332906c213203b90b0d59c9a1127e46d8ca85 Mon Sep 17 00:00:00 2001 From: Nick Johnson Date: Mon, 10 Jul 2017 15:48:42 +0100 Subject: cmd: Added support for copying data to another DB instance --- cmd/geth/chaincmd.go | 196 ++++++++++++++++++++++++++++++++++++++++++++++++++- cmd/geth/main.go | 1 + 2 files changed, 196 insertions(+), 1 deletion(-) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 12bc1d7c6..f9fd8c013 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -19,6 +19,7 @@ package main import ( "encoding/json" "fmt" + "math/big" "os" "runtime" "strconv" @@ -31,7 +32,9 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie" "github.com/syndtr/goleveldb/leveldb/util" @@ -71,7 +74,7 @@ It expects the genesis file as argument.`, The import command imports blocks from an RLP-encoded form. The form can be one file with several RLP-encoded blocks, or several files can be used. -If only one file is used, import error will result in failure. If several files are used, +If only one file is used, import error will result in failure. If several files are used, processing will proceed even if an individual RLP-file import failure occurs.`, } exportCommand = cli.Command{ @@ -90,6 +93,21 @@ Requires a first argument of the file to write to. Optional second and third arguments control the first and last block to write. In this mode, the file will be appended if already existing.`, + } + copydbCommand = cli.Command{ + Action: utils.MigrateFlags(copyDb), + Name: "copydb", + Usage: "Copy from one chain DB into another using the downloader", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.SyncModeFlag, + utils.FakePoWFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +The first argument must be the directory containing the blockchain to download from`, } removedbCommand = cli.Command{ Action: utils.MigrateFlags(removeDB), @@ -268,6 +286,182 @@ func exportChain(ctx *cli.Context) error { return nil } +type localPeer struct { + chainDb ethdb.Database + hc *core.HeaderChain + dl *downloader.Downloader +} + +func (lp *localPeer) Head() (common.Hash, *big.Int) { + header := lp.hc.CurrentHeader() + return header.Hash(), header.Number +} + +func (lp *localPeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error { + var ( + headers []*types.Header + unknown bool + ) + + for !unknown && len(headers) < amount { + origin := lp.hc.GetHeaderByHash(hash) + if origin == nil { + break + } + + number := origin.Number.Uint64() + headers = append(headers, origin) + if reverse { + for i := 0; i < int(skip)+1; i++ { + if header := lp.hc.GetHeader(hash, number); header != nil { + hash = header.ParentHash + number-- + } else { + unknown = true + break + } + } + } else { + var ( + current = origin.Number.Uint64() + next = current + uint64(skip) + 1 + ) + if header := lp.hc.GetHeaderByNumber(next); header != nil { + if lp.hc.GetBlockHashesFromHash(header.Hash(), uint64(skip+1))[skip] == hash { + hash = header.Hash() + } else { + unknown = true + } + } else { + unknown = true + } + } + } + + lp.dl.DeliverHeaders("local", headers) + return nil +} + +func (lp *localPeer) RequestHeadersByNumber(num uint64, amount int, skip int, reverse bool) error { + var ( + headers []*types.Header + unknown bool + ) + + for !unknown && len(headers) < amount { + origin := lp.hc.GetHeaderByNumber(num) + if origin == nil { + break + } + + if reverse { + if num >= uint64(skip+1) { + num -= uint64(skip + 1) + } else { + unknown = true + } + } else { + num += uint64(skip + 1) + } + headers = append(headers, origin) + } + + lp.dl.DeliverHeaders("local", headers) + return nil +} + +func (lp *localPeer) RequestBodies(hashes []common.Hash) error { + var ( + transactions [][]*types.Transaction + uncles [][]*types.Header + ) + + for _, hash := range hashes { + block := core.GetBlock(lp.chainDb, hash, lp.hc.GetBlockNumber(hash)) + transactions = append(transactions, block.Transactions()) + uncles = append(uncles, block.Uncles()) + } + + lp.dl.DeliverBodies("local", transactions, uncles) + return nil +} + +func (lp *localPeer) RequestReceipts(hashes []common.Hash) error { + var receipts [][]*types.Receipt + + for _, hash := range hashes { + receipts = append(receipts, core.GetBlockReceipts(lp.chainDb, hash, lp.hc.GetBlockNumber(hash))) + } + + lp.dl.DeliverReceipts("local", receipts) + return nil +} + +func (lp *localPeer) RequestNodeData(hashes []common.Hash) error { + var data [][]byte + + for _, hash := range hashes { + if entry, err := lp.chainDb.Get(hash.Bytes()); err == nil { + data = append(data, entry) + } + } + + lp.dl.DeliverNodeData("local", data) + return nil +} + +func copyDb(ctx *cli.Context) error { + if len(ctx.Args()) != 1 { + utils.Fatalf("This command requires an argument.") + } + + stack := makeFullNode(ctx) + chain, chainDb := utils.MakeChain(ctx, stack) + start := time.Now() + + syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) + mux := new(event.TypeMux) + dl := downloader.New(syncmode, chainDb, mux, chain, nil, nil) + + var err error + filename := ctx.Args().First() + cache := ctx.GlobalInt(utils.CacheFlag.Name) + handles := 256 + localdb, err := ethdb.NewLDBDatabase(filename, cache, handles) + if err != nil { + return err + } + + hc, err := core.NewHeaderChain(localdb, chain.Config(), chain.Engine(), func() bool { return false }) + if err != nil { + return err + } + + peer := &localPeer{localdb, hc, dl} + if err := dl.RegisterPeer("local", 63, peer); err != nil { + return err + } + + currentHeader := hc.CurrentHeader() + if err := dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil { + return err + } + for dl.Synchronising() { + time.Sleep(10 * time.Millisecond) + } + + fmt.Printf("Database copy done in %v", time.Since(start)) + + start = time.Now() + fmt.Println("Compacting entire database...") + if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil { + utils.Fatalf("Compaction failed: %v", err) + } + fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) + + return nil +} + func removeDB(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 8166c9ce8..88f3528f3 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -146,6 +146,7 @@ func init() { initCommand, importCommand, exportCommand, + copydbCommand, removedbCommand, dumpCommand, // See monitorcmd.go: -- cgit From 35767dfd0c468872acf99c3fccc3a206ce709128 Mon Sep 17 00:00:00 2001 From: Péter Szilágyi Date: Tue, 10 Oct 2017 15:51:09 +0300 Subject: cmd, eth: separate out FakePeer for future reuse --- cmd/geth/chaincmd.go | 163 +++++---------------------------------------- cmd/utils/flags.go | 21 ++++-- eth/downloader/fakepeer.go | 160 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 192 insertions(+), 152 deletions(-) create mode 100644 eth/downloader/fakepeer.go diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index f9fd8c013..4a9a7b11b 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -19,7 +19,6 @@ package main import ( "encoding/json" "fmt" - "math/big" "os" "runtime" "strconv" @@ -97,13 +96,15 @@ if already existing.`, copydbCommand = cli.Command{ Action: utils.MigrateFlags(copyDb), Name: "copydb", - Usage: "Copy from one chain DB into another using the downloader", - ArgsUsage: "", + Usage: "Create a local chain from a target chaindata folder", + ArgsUsage: "", Flags: []cli.Flag{ utils.DataDirFlag, utils.CacheFlag, utils.SyncModeFlag, utils.FakePoWFlag, + utils.TestnetFlag, + utils.RinkebyFlag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` @@ -286,172 +287,44 @@ func exportChain(ctx *cli.Context) error { return nil } -type localPeer struct { - chainDb ethdb.Database - hc *core.HeaderChain - dl *downloader.Downloader -} - -func (lp *localPeer) Head() (common.Hash, *big.Int) { - header := lp.hc.CurrentHeader() - return header.Hash(), header.Number -} - -func (lp *localPeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error { - var ( - headers []*types.Header - unknown bool - ) - - for !unknown && len(headers) < amount { - origin := lp.hc.GetHeaderByHash(hash) - if origin == nil { - break - } - - number := origin.Number.Uint64() - headers = append(headers, origin) - if reverse { - for i := 0; i < int(skip)+1; i++ { - if header := lp.hc.GetHeader(hash, number); header != nil { - hash = header.ParentHash - number-- - } else { - unknown = true - break - } - } - } else { - var ( - current = origin.Number.Uint64() - next = current + uint64(skip) + 1 - ) - if header := lp.hc.GetHeaderByNumber(next); header != nil { - if lp.hc.GetBlockHashesFromHash(header.Hash(), uint64(skip+1))[skip] == hash { - hash = header.Hash() - } else { - unknown = true - } - } else { - unknown = true - } - } - } - - lp.dl.DeliverHeaders("local", headers) - return nil -} - -func (lp *localPeer) RequestHeadersByNumber(num uint64, amount int, skip int, reverse bool) error { - var ( - headers []*types.Header - unknown bool - ) - - for !unknown && len(headers) < amount { - origin := lp.hc.GetHeaderByNumber(num) - if origin == nil { - break - } - - if reverse { - if num >= uint64(skip+1) { - num -= uint64(skip + 1) - } else { - unknown = true - } - } else { - num += uint64(skip + 1) - } - headers = append(headers, origin) - } - - lp.dl.DeliverHeaders("local", headers) - return nil -} - -func (lp *localPeer) RequestBodies(hashes []common.Hash) error { - var ( - transactions [][]*types.Transaction - uncles [][]*types.Header - ) - - for _, hash := range hashes { - block := core.GetBlock(lp.chainDb, hash, lp.hc.GetBlockNumber(hash)) - transactions = append(transactions, block.Transactions()) - uncles = append(uncles, block.Uncles()) - } - - lp.dl.DeliverBodies("local", transactions, uncles) - return nil -} - -func (lp *localPeer) RequestReceipts(hashes []common.Hash) error { - var receipts [][]*types.Receipt - - for _, hash := range hashes { - receipts = append(receipts, core.GetBlockReceipts(lp.chainDb, hash, lp.hc.GetBlockNumber(hash))) - } - - lp.dl.DeliverReceipts("local", receipts) - return nil -} - -func (lp *localPeer) RequestNodeData(hashes []common.Hash) error { - var data [][]byte - - for _, hash := range hashes { - if entry, err := lp.chainDb.Get(hash.Bytes()); err == nil { - data = append(data, entry) - } - } - - lp.dl.DeliverNodeData("local", data) - return nil -} - func copyDb(ctx *cli.Context) error { + // Ensure we have a source chain directory to copy if len(ctx.Args()) != 1 { - utils.Fatalf("This command requires an argument.") + utils.Fatalf("Source chaindata directory path argument missing") } - + // Initialize a new chain for the running node to sync into stack := makeFullNode(ctx) chain, chainDb := utils.MakeChain(ctx, stack) - start := time.Now() syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) - mux := new(event.TypeMux) - dl := downloader.New(syncmode, chainDb, mux, chain, nil, nil) + dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil) - var err error - filename := ctx.Args().First() - cache := ctx.GlobalInt(utils.CacheFlag.Name) - handles := 256 - localdb, err := ethdb.NewLDBDatabase(filename, cache, handles) + // Create a source peer to satisfy downloader requests from + db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256) if err != nil { return err } - - hc, err := core.NewHeaderChain(localdb, chain.Config(), chain.Engine(), func() bool { return false }) + hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false }) if err != nil { return err } - - peer := &localPeer{localdb, hc, dl} - if err := dl.RegisterPeer("local", 63, peer); err != nil { + peer := downloader.NewFakePeer("local", db, hc, dl) + if err = dl.RegisterPeer("local", 63, peer); err != nil { return err } + // Synchronise with the simulated peer + start := time.Now() currentHeader := hc.CurrentHeader() - if err := dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil { + if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil { return err } for dl.Synchronising() { time.Sleep(10 * time.Millisecond) } + fmt.Printf("Database copy done in %v\n", time.Since(start)) - fmt.Printf("Database copy done in %v", time.Since(start)) - + // Compact the entire database to remove any sync overhead start = time.Now() fmt.Println("Compacting entire database...") if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index a62b4940b..bfef619f6 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -31,6 +31,8 @@ import ( "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" @@ -1086,17 +1088,22 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai var err error chainDb = MakeChainDatabase(ctx, stack) - engine := ethash.NewFaker() - if !ctx.GlobalBool(FakePoWFlag.Name) { - engine = ethash.New( - stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk, - stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk, - ) - } config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx)) if err != nil { Fatalf("%v", err) } + var engine consensus.Engine + if config.Clique != nil { + engine = clique.New(config.Clique, chainDb) + } else { + engine = ethash.NewFaker() + if !ctx.GlobalBool(FakePoWFlag.Name) { + engine = ethash.New( + stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk, + stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk, + ) + } + } vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)} chain, err = core.NewBlockChain(chainDb, config, engine, vmcfg) if err != nil { diff --git a/eth/downloader/fakepeer.go b/eth/downloader/fakepeer.go new file mode 100644 index 000000000..ebdb9c334 --- /dev/null +++ b/eth/downloader/fakepeer.go @@ -0,0 +1,160 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" +) + +// FakePeer is a mock downloader peer that operates on a local database instance +// instead of being an actual live node. It's useful for testing and to implement +// sync commands from an xisting local database. +type FakePeer struct { + id string + db ethdb.Database + hc *core.HeaderChain + dl *Downloader +} + +// NewFakePeer creates a new mock downloader peer with the given data sources. +func NewFakePeer(id string, db ethdb.Database, hc *core.HeaderChain, dl *Downloader) *FakePeer { + return &FakePeer{id: id, db: db, hc: hc, dl: dl} +} + +// Head implements downloader.Peer, returning the current head hash and number +// of the best known header. +func (p *FakePeer) Head() (common.Hash, *big.Int) { + header := p.hc.CurrentHeader() + return header.Hash(), header.Number +} + +// RequestHeadersByHash implements downloader.Peer, returning a batch of headers +// defined by the origin hash and the associaed query parameters. +func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error { + var ( + headers []*types.Header + unknown bool + ) + for !unknown && len(headers) < amount { + origin := p.hc.GetHeaderByHash(hash) + if origin == nil { + break + } + number := origin.Number.Uint64() + headers = append(headers, origin) + if reverse { + for i := 0; i < int(skip)+1; i++ { + if header := p.hc.GetHeader(hash, number); header != nil { + hash = header.ParentHash + number-- + } else { + unknown = true + break + } + } + } else { + var ( + current = origin.Number.Uint64() + next = current + uint64(skip) + 1 + ) + if header := p.hc.GetHeaderByNumber(next); header != nil { + if p.hc.GetBlockHashesFromHash(header.Hash(), uint64(skip+1))[skip] == hash { + hash = header.Hash() + } else { + unknown = true + } + } else { + unknown = true + } + } + } + p.dl.DeliverHeaders(p.id, headers) + return nil +} + +// RequestHeadersByNumber implements downloader.Peer, returning a batch of headers +// defined by the origin number and the associaed query parameters. +func (p *FakePeer) RequestHeadersByNumber(number uint64, amount int, skip int, reverse bool) error { + var ( + headers []*types.Header + unknown bool + ) + for !unknown && len(headers) < amount { + origin := p.hc.GetHeaderByNumber(number) + if origin == nil { + break + } + if reverse { + if number >= uint64(skip+1) { + number -= uint64(skip + 1) + } else { + unknown = true + } + } else { + number += uint64(skip + 1) + } + headers = append(headers, origin) + } + p.dl.DeliverHeaders(p.id, headers) + return nil +} + +// RequestBodies implements downloader.Peer, returning a batch of block bodies +// corresponding to the specified block hashes. +func (p *FakePeer) RequestBodies(hashes []common.Hash) error { + var ( + txs [][]*types.Transaction + uncles [][]*types.Header + ) + for _, hash := range hashes { + block := core.GetBlock(p.db, hash, p.hc.GetBlockNumber(hash)) + + txs = append(txs, block.Transactions()) + uncles = append(uncles, block.Uncles()) + } + p.dl.DeliverBodies(p.id, txs, uncles) + return nil +} + +// RequestReceipts implements downloader.Peer, returning a batch of transaction +// receipts corresponding to the specified block hashes. +func (p *FakePeer) RequestReceipts(hashes []common.Hash) error { + var receipts [][]*types.Receipt + for _, hash := range hashes { + receipts = append(receipts, core.GetBlockReceipts(p.db, hash, p.hc.GetBlockNumber(hash))) + } + p.dl.DeliverReceipts(p.id, receipts) + return nil +} + +// RequestNodeData implements downloader.Peer, returning a batch of state trie +// nodes corresponding to the specified trie hashes. +func (p *FakePeer) RequestNodeData(hashes []common.Hash) error { + var data [][]byte + for _, hash := range hashes { + if entry, err := p.db.Get(hash.Bytes()); err == nil { + data = append(data, entry) + } + } + p.dl.DeliverNodeData(p.id, data) + return nil +} -- cgit