diff options
author | Anton Evangelatov <anton.evangelatov@gmail.com> | 2018-08-14 22:03:56 +0800 |
---|---|---|
committer | Balint Gabor <balint.g@gmail.com> | 2018-08-14 22:03:56 +0800 |
commit | 97887d98da703a31040bceee13bce9ee77fca673 (patch) | |
tree | ffc9a8a6fba087b26d33c0ef6c939a19aaaad8c9 | |
parent | 8a040de60bd6b740ebe87cd8e1fe6bfdb6635d2f (diff) | |
download | go-tangerine-97887d98da703a31040bceee13bce9ee77fca673.tar.gz go-tangerine-97887d98da703a31040bceee13bce9ee77fca673.tar.zst go-tangerine-97887d98da703a31040bceee13bce9ee77fca673.zip |
swarm/network, swarm/storage: validate chunk size (#17397)
* swarm/network, swarm/storage: validate default chunk size
* swarm/bmt, swarm/network, swarm/storage: update BMT hash initialisation
* swarm/bmt: move segmentCount to tests
* swarm/chunk: change chunk.DefaultSize to be untyped const
* swarm/storage: add size validator
* swarm/storage: add chunk size validation to localstore
* swarm/storage: move validation from localstore to validator
* swarm/storage: global chunk rules in MRU
-rw-r--r-- | swarm/bmt/bmt.go | 5 | ||||
-rw-r--r-- | swarm/bmt/bmt_test.go | 19 | ||||
-rw-r--r-- | swarm/chunk/chunk.go | 5 | ||||
-rw-r--r-- | swarm/network/stream/delivery.go | 7 | ||||
-rw-r--r-- | swarm/storage/chunker.go | 9 | ||||
-rw-r--r-- | swarm/storage/hasherstore.go | 9 | ||||
-rw-r--r-- | swarm/storage/ldbstore_test.go | 11 | ||||
-rw-r--r-- | swarm/storage/localstore.go | 10 | ||||
-rw-r--r-- | swarm/storage/localstore_test.go | 8 | ||||
-rw-r--r-- | swarm/storage/mru/handler.go | 25 | ||||
-rw-r--r-- | swarm/storage/mru/resource_test.go | 8 | ||||
-rw-r--r-- | swarm/storage/mru/testutil.go | 5 | ||||
-rw-r--r-- | swarm/storage/mru/update.go | 3 | ||||
-rw-r--r-- | swarm/storage/pyramid.go | 5 | ||||
-rw-r--r-- | swarm/storage/types.go | 13 | ||||
-rw-r--r-- | swarm/swarm.go | 13 |
16 files changed, 79 insertions, 76 deletions
diff --git a/swarm/bmt/bmt.go b/swarm/bmt/bmt.go index 97e0e141e..a85d4369e 100644 --- a/swarm/bmt/bmt.go +++ b/swarm/bmt/bmt.go @@ -55,9 +55,6 @@ Two implementations are provided: */ const ( - // SegmentCount is the maximum number of segments of the underlying chunk - // Should be equal to max-chunk-data-size / hash-size - SegmentCount = 128 // PoolSize is the maximum number of bmt trees used by the hashers, i.e, // the maximum number of concurrent BMT hashing operations performed by the same hasher PoolSize = 8 @@ -318,7 +315,7 @@ func (h *Hasher) Sum(b []byte) (s []byte) { // with every full segment calls writeSection in a go routine func (h *Hasher) Write(b []byte) (int, error) { l := len(b) - if l == 0 || l > 4096 { + if l == 0 || l > h.pool.Size { return 0, nil } t := h.getTree() diff --git a/swarm/bmt/bmt_test.go b/swarm/bmt/bmt_test.go index 891d8cbb2..760aa11d8 100644 --- a/swarm/bmt/bmt_test.go +++ b/swarm/bmt/bmt_test.go @@ -34,6 +34,13 @@ import ( // the actual data length generated (could be longer than max datalength of the BMT) const BufferSize = 4128 +const ( + // segmentCount is the maximum number of segments of the underlying chunk + // Should be equal to max-chunk-data-size / hash-size + // Currently set to 128 == 4096 (default chunk size) / 32 (sha3.keccak256 size) + segmentCount = 128 +) + var counts = []int{1, 2, 3, 4, 5, 8, 9, 15, 16, 17, 32, 37, 42, 53, 63, 64, 65, 111, 127, 128} // calculates the Keccak256 SHA3 hash of the data @@ -224,14 +231,14 @@ func TestHasherReuse(t *testing.T) { // tests if bmt reuse is not corrupting result func testHasherReuse(poolsize int, t *testing.T) { hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, SegmentCount, poolsize) + pool := NewTreePool(hasher, segmentCount, poolsize) defer pool.Drain(0) bmt := New(pool) for i := 0; i < 100; i++ { data := newData(BufferSize) n := rand.Intn(bmt.Size()) - err := testHasherCorrectness(bmt, hasher, data, n, SegmentCount) + err := testHasherCorrectness(bmt, hasher, data, n, segmentCount) if err != nil { t.Fatal(err) } @@ -241,7 +248,7 @@ func testHasherReuse(poolsize int, t *testing.T) { // Tests if pool can be cleanly reused even in concurrent use by several hasher func TestBMTConcurrentUse(t *testing.T) { hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, SegmentCount, PoolSize) + pool := NewTreePool(hasher, segmentCount, PoolSize) defer pool.Drain(0) cycles := 100 errc := make(chan error) @@ -451,7 +458,7 @@ func benchmarkBMTBaseline(t *testing.B, n int) { func benchmarkBMT(t *testing.B, n int) { data := newData(n) hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, SegmentCount, PoolSize) + pool := NewTreePool(hasher, segmentCount, PoolSize) bmt := New(pool) t.ReportAllocs() @@ -465,7 +472,7 @@ func benchmarkBMT(t *testing.B, n int) { func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) { data := newData(n) hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, SegmentCount, PoolSize) + pool := NewTreePool(hasher, segmentCount, PoolSize) bmt := New(pool).NewAsyncWriter(double) idxs, segments := splitAndShuffle(bmt.SectionSize(), data) shuffle(len(idxs), func(i int, j int) { @@ -483,7 +490,7 @@ func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) { func benchmarkPool(t *testing.B, poolsize, n int) { data := newData(n) hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, SegmentCount, poolsize) + pool := NewTreePool(hasher, segmentCount, poolsize) cycles := 100 t.ReportAllocs() diff --git a/swarm/chunk/chunk.go b/swarm/chunk/chunk.go new file mode 100644 index 000000000..1449efccd --- /dev/null +++ b/swarm/chunk/chunk.go @@ -0,0 +1,5 @@ +package chunk + +const ( + DefaultSize = 4096 +) diff --git a/swarm/network/stream/delivery.go b/swarm/network/stream/delivery.go index fa210e300..36040339d 100644 --- a/swarm/network/stream/delivery.go +++ b/swarm/network/stream/delivery.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p/discover" + cp "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/network" "github.com/ethereum/go-ethereum/swarm/spancontext" @@ -229,6 +230,11 @@ R: for req := range d.receiveC { processReceivedChunksCount.Inc(1) + if len(req.SData) > cp.DefaultSize+8 { + log.Warn("received chunk is bigger than expected", "len", len(req.SData)) + continue R + } + // this should be has locally chunk, err := d.db.Get(context.TODO(), req.Addr) if err == nil { @@ -244,6 +250,7 @@ R: continue R default: } + chunk.SData = req.SData d.db.Put(context.TODO(), chunk) diff --git a/swarm/storage/chunker.go b/swarm/storage/chunker.go index b9b502273..6d805b8e2 100644 --- a/swarm/storage/chunker.go +++ b/swarm/storage/chunker.go @@ -25,6 +25,7 @@ import ( "time" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/spancontext" opentracing "github.com/opentracing/opentracing-go" @@ -69,10 +70,6 @@ var ( errOperationTimedOut = errors.New("operation timed out") ) -const ( - DefaultChunkSize int64 = 4096 -) - type ChunkerParams struct { chunkSize int64 hashSize int64 @@ -136,7 +133,7 @@ type TreeChunker struct { func TreeJoin(ctx context.Context, addr Address, getter Getter, depth int) *LazyChunkReader { jp := &JoinerParams{ ChunkerParams: ChunkerParams{ - chunkSize: DefaultChunkSize, + chunkSize: chunk.DefaultSize, hashSize: int64(len(addr)), }, addr: addr, @@ -156,7 +153,7 @@ func TreeSplit(ctx context.Context, data io.Reader, size int64, putter Putter) ( tsp := &TreeSplitterParams{ SplitterParams: SplitterParams{ ChunkerParams: ChunkerParams{ - chunkSize: DefaultChunkSize, + chunkSize: chunk.DefaultSize, hashSize: putter.RefSize(), }, reader: data, diff --git a/swarm/storage/hasherstore.go b/swarm/storage/hasherstore.go index 139c0ee03..bc23077c1 100644 --- a/swarm/storage/hasherstore.go +++ b/swarm/storage/hasherstore.go @@ -22,6 +22,7 @@ import ( "sync" "github.com/ethereum/go-ethereum/crypto/sha3" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/storage/encryption" ) @@ -57,7 +58,7 @@ func NewHasherStore(chunkStore ChunkStore, hashFunc SwarmHasher, toEncrypt bool) refSize := int64(hashSize) if toEncrypt { refSize += encryption.KeyLength - chunkEncryption = newChunkEncryption(DefaultChunkSize, refSize) + chunkEncryption = newChunkEncryption(chunk.DefaultSize, refSize) } return &hasherStore{ @@ -190,9 +191,9 @@ func (h *hasherStore) decryptChunkData(chunkData ChunkData, encryptionKey encryp // removing extra bytes which were just added for padding length := ChunkData(decryptedSpan).Size() - for length > DefaultChunkSize { - length = length + (DefaultChunkSize - 1) - length = length / DefaultChunkSize + for length > chunk.DefaultSize { + length = length + (chunk.DefaultSize - 1) + length = length / chunk.DefaultSize length *= h.refSize } diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go index baf9e8c14..5ee88baa5 100644 --- a/swarm/storage/ldbstore_test.go +++ b/swarm/storage/ldbstore_test.go @@ -27,6 +27,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage/mock/mem" @@ -184,7 +185,7 @@ func testIterator(t *testing.T, mock bool) { t.Fatalf("init dbStore failed: %v", err) } - chunks := GenerateRandomChunks(DefaultChunkSize, chunkcount) + chunks := GenerateRandomChunks(chunk.DefaultSize, chunkcount) wg := &sync.WaitGroup{} wg.Add(len(chunks)) @@ -294,7 +295,7 @@ func TestLDBStoreWithoutCollectGarbage(t *testing.T) { chunks := []*Chunk{} for i := 0; i < n; i++ { - c := GenerateRandomChunk(DefaultChunkSize) + c := GenerateRandomChunk(chunk.DefaultSize) chunks = append(chunks, c) log.Trace("generate random chunk", "idx", i, "chunk", c) } @@ -344,7 +345,7 @@ func TestLDBStoreCollectGarbage(t *testing.T) { chunks := []*Chunk{} for i := 0; i < n; i++ { - c := GenerateRandomChunk(DefaultChunkSize) + c := GenerateRandomChunk(chunk.DefaultSize) chunks = append(chunks, c) log.Trace("generate random chunk", "idx", i, "chunk", c) } @@ -398,7 +399,7 @@ func TestLDBStoreAddRemove(t *testing.T) { chunks := []*Chunk{} for i := 0; i < n; i++ { - c := GenerateRandomChunk(DefaultChunkSize) + c := GenerateRandomChunk(chunk.DefaultSize) chunks = append(chunks, c) log.Trace("generate random chunk", "idx", i, "chunk", c) } @@ -460,7 +461,7 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) { chunks := []*Chunk{} for i := 0; i < capacity; i++ { - c := GenerateRandomChunk(DefaultChunkSize) + c := GenerateRandomChunk(chunk.DefaultSize) chunks = append(chunks, c) log.Trace("generate random chunk", "idx", i, "chunk", c) } diff --git a/swarm/storage/localstore.go b/swarm/storage/localstore.go index 096d150ae..9e3474979 100644 --- a/swarm/storage/localstore.go +++ b/swarm/storage/localstore.go @@ -98,20 +98,16 @@ func NewTestLocalStoreForAddr(params *LocalStoreParams) (*LocalStore, error) { // After the LDBStore.Put, it is ensured that the MemStore // contains the chunk with the same data, but nil ReqC channel. func (ls *LocalStore) Put(ctx context.Context, chunk *Chunk) { - if l := len(chunk.SData); l < 9 { - log.Debug("incomplete chunk data", "addr", chunk.Addr, "length", l) - chunk.SetErrored(ErrChunkInvalid) - chunk.markAsStored() - return - } valid := true + // ls.Validators contains a list of one validator per chunk type. + // if one validator succeeds, then the chunk is valid for _, v := range ls.Validators { if valid = v.Validate(chunk.Addr, chunk.SData); valid { break } } if !valid { - log.Trace("invalid content address", "addr", chunk.Addr) + log.Trace("invalid chunk", "addr", chunk.Addr, "len", len(chunk.SData)) chunk.SetErrored(ErrChunkInvalid) chunk.markAsStored() return diff --git a/swarm/storage/localstore_test.go b/swarm/storage/localstore_test.go index 2bb81efa3..ae62218fe 100644 --- a/swarm/storage/localstore_test.go +++ b/swarm/storage/localstore_test.go @@ -20,6 +20,8 @@ import ( "io/ioutil" "os" "testing" + + "github.com/ethereum/go-ethereum/swarm/chunk" ) var ( @@ -61,7 +63,7 @@ func TestValidator(t *testing.T) { // add content address validator and check puts // bad should fail, good should pass store.Validators = append(store.Validators, NewContentAddressValidator(hashfunc)) - chunks = GenerateRandomChunks(DefaultChunkSize, 2) + chunks = GenerateRandomChunks(chunk.DefaultSize, 2) goodChunk = chunks[0] badChunk = chunks[1] copy(badChunk.SData, goodChunk.SData) @@ -79,7 +81,7 @@ func TestValidator(t *testing.T) { var negV boolTestValidator store.Validators = append(store.Validators, negV) - chunks = GenerateRandomChunks(DefaultChunkSize, 2) + chunks = GenerateRandomChunks(chunk.DefaultSize, 2) goodChunk = chunks[0] badChunk = chunks[1] copy(badChunk.SData, goodChunk.SData) @@ -97,7 +99,7 @@ func TestValidator(t *testing.T) { var posV boolTestValidator = true store.Validators = append(store.Validators, posV) - chunks = GenerateRandomChunks(DefaultChunkSize, 2) + chunks = GenerateRandomChunks(chunk.DefaultSize, 2) goodChunk = chunks[0] badChunk = chunks[1] copy(badChunk.SData, goodChunk.SData) diff --git a/swarm/storage/mru/handler.go b/swarm/storage/mru/handler.go index 32f43d502..57561fd14 100644 --- a/swarm/storage/mru/handler.go +++ b/swarm/storage/mru/handler.go @@ -21,17 +21,15 @@ package mru import ( "bytes" "context" - "fmt" "sync" "time" "unsafe" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage" ) -const chunkSize = 4096 // temporary until we implement FileStore in the resourcehandler - type Handler struct { chunkStore *storage.NetStore HashSize int @@ -66,8 +64,7 @@ func init() { } // NewHandler creates a new Mutable Resource API -func NewHandler(params *HandlerParams) (*Handler, error) { - +func NewHandler(params *HandlerParams) *Handler { rh := &Handler{ resources: make(map[uint64]*resource), storeTimeout: defaultStoreTimeout, @@ -82,7 +79,7 @@ func NewHandler(params *HandlerParams) (*Handler, error) { hashPool.Put(hashfunc) } - return rh, nil + return rh } // SetStore sets the store backend for the Mutable Resource API @@ -94,9 +91,8 @@ func (h *Handler) SetStore(store *storage.NetStore) { // If it looks like a resource update, the chunk address is checked against the ownerAddr of the update's signature // It implements the storage.ChunkValidator interface func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { - dataLength := len(data) - if dataLength < minimumChunkLength { + if dataLength < minimumChunkLength || dataLength > chunk.DefaultSize+8 { return false } @@ -106,7 +102,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { rootAddr, _ := metadataHash(data) valid := bytes.Equal(chunkAddr, rootAddr) if !valid { - log.Debug(fmt.Sprintf("Invalid root metadata chunk with address: %s", chunkAddr.Hex())) + log.Debug("Invalid root metadata chunk with address", "addr", chunkAddr.Hex()) } return valid } @@ -118,7 +114,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { // First, deserialize the chunk var r SignedResourceUpdate if err := r.fromChunk(chunkAddr, data); err != nil { - log.Debug("Invalid resource chunk with address %s: %s ", chunkAddr.Hex(), err.Error()) + log.Debug("Invalid resource chunk", "addr", chunkAddr.Hex(), "err", err.Error()) return false } @@ -126,7 +122,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { // that was used to retrieve this chunk // if this validation fails, someone forged a chunk. if !bytes.Equal(chunkAddr, r.updateHeader.UpdateAddr()) { - log.Debug("period,version,rootAddr contained in update chunk do not match updateAddr %s", chunkAddr.Hex()) + log.Debug("period,version,rootAddr contained in update chunk do not match updateAddr", "addr", chunkAddr.Hex()) return false } @@ -134,7 +130,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { // If it fails, it means either the signature is not valid, data is corrupted // or someone is trying to update someone else's resource. if err := r.Verify(); err != nil { - log.Debug("Invalid signature: %v", err) + log.Debug("Invalid signature", "err", err) return false } @@ -172,11 +168,6 @@ func (h *Handler) GetVersion(rootAddr storage.Address) (uint32, error) { return rsrc.version, nil } -// \TODO should be hashsize * branches from the chosen chunker, implement with FileStore -func (h *Handler) chunkSize() int64 { - return chunkSize -} - // New creates a new metadata chunk out of the request passed in. func (h *Handler) New(ctx context.Context, request *Request) error { diff --git a/swarm/storage/mru/resource_test.go b/swarm/storage/mru/resource_test.go index 95c9eccdf..76d7c58a1 100644 --- a/swarm/storage/mru/resource_test.go +++ b/swarm/storage/mru/resource_test.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/contracts/ens" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/multihash" "github.com/ethereum/go-ethereum/swarm/storage" ) @@ -776,14 +777,11 @@ func TestValidatorInStore(t *testing.T) { // set up resource handler and add is as a validator to the localstore rhParams := &HandlerParams{} - rh, err := NewHandler(rhParams) - if err != nil { - t.Fatal(err) - } + rh := NewHandler(rhParams) store.Validators = append(store.Validators, rh) // create content addressed chunks, one good, one faulty - chunks := storage.GenerateRandomChunks(storage.DefaultChunkSize, 2) + chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2) goodChunk := chunks[0] badChunk := chunks[1] badChunk.SData = goodChunk.SData diff --git a/swarm/storage/mru/testutil.go b/swarm/storage/mru/testutil.go index 751f51af3..6efcba9ab 100644 --- a/swarm/storage/mru/testutil.go +++ b/swarm/storage/mru/testutil.go @@ -38,10 +38,7 @@ func (t *TestHandler) Close() { // NewTestHandler creates Handler object to be used for testing purposes. func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) { path := filepath.Join(datadir, testDbDirName) - rh, err := NewHandler(params) - if err != nil { - return nil, fmt.Errorf("resource handler create fail: %v", err) - } + rh := NewHandler(params) localstoreparams := storage.NewDefaultLocalStoreParams() localstoreparams.Init(path) localStore, err := storage.NewLocalStore(localstoreparams, nil) diff --git a/swarm/storage/mru/update.go b/swarm/storage/mru/update.go index 88c4ac4e5..d1bd37ddf 100644 --- a/swarm/storage/mru/update.go +++ b/swarm/storage/mru/update.go @@ -20,6 +20,7 @@ import ( "encoding/binary" "errors" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/multihash" ) @@ -42,7 +43,7 @@ const chunkPrefixLength = 2 + 2 // // Minimum size is Header + 1 (minimum data length, enforced) const minimumUpdateDataLength = updateHeaderLength + 1 -const maxUpdateDataLength = chunkSize - signatureLength - updateHeaderLength - chunkPrefixLength +const maxUpdateDataLength = chunk.DefaultSize - signatureLength - updateHeaderLength - chunkPrefixLength // binaryPut serializes the resource update information into the given slice func (r *resourceUpdate) binaryPut(serializedData []byte) error { diff --git a/swarm/storage/pyramid.go b/swarm/storage/pyramid.go index 2923c81c5..36ff66d04 100644 --- a/swarm/storage/pyramid.go +++ b/swarm/storage/pyramid.go @@ -25,6 +25,7 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/log" ) @@ -101,11 +102,11 @@ func NewPyramidSplitterParams(addr Address, reader io.Reader, putter Putter, get New chunks to store are store using the putter which the caller provides. */ func PyramidSplit(ctx context.Context, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) { - return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, DefaultChunkSize)).Split(ctx) + return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, chunk.DefaultSize)).Split(ctx) } func PyramidAppend(ctx context.Context, addr Address, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) { - return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, DefaultChunkSize)).Append(ctx) + return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, chunk.DefaultSize)).Append(ctx) } // Entry to create a tree node diff --git a/swarm/storage/types.go b/swarm/storage/types.go index 3114ef576..53e3af485 100644 --- a/swarm/storage/types.go +++ b/swarm/storage/types.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto/sha3" "github.com/ethereum/go-ethereum/swarm/bmt" + "github.com/ethereum/go-ethereum/swarm/chunk" ) const MaxPO = 16 @@ -114,7 +115,9 @@ func MakeHashFunc(hash string) SwarmHasher { case "BMT": return func() SwarmHash { hasher := sha3.NewKeccak256 - pool := bmt.NewTreePool(hasher, bmt.SegmentCount, bmt.PoolSize) + hasherSize := hasher().Size() + segmentCount := chunk.DefaultSize / hasherSize + pool := bmt.NewTreePool(hasher, segmentCount, bmt.PoolSize) return bmt.New(pool) } } @@ -230,8 +233,8 @@ func GenerateRandomChunk(dataSize int64) *Chunk { func GenerateRandomChunks(dataSize int64, count int) (chunks []*Chunk) { var i int hasher := MakeHashFunc(DefaultHash)() - if dataSize > DefaultChunkSize { - dataSize = DefaultChunkSize + if dataSize > chunk.DefaultSize { + dataSize = chunk.DefaultSize } for i = 0; i < count; i++ { @@ -345,6 +348,10 @@ func NewContentAddressValidator(hasher SwarmHasher) *ContentAddressValidator { // Validate that the given key is a valid content address for the given data func (v *ContentAddressValidator) Validate(addr Address, data []byte) bool { + if l := len(data); l < 9 || l > chunk.DefaultSize+8 { + return false + } + hasher := v.Hasher() hasher.ResetWithLength(data[:8]) hasher.Write(data[8:]) diff --git a/swarm/swarm.go b/swarm/swarm.go index c380a376f..f731ff33d 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -195,18 +195,13 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e var resourceHandler *mru.Handler rhparams := &mru.HandlerParams{} - resourceHandler, err = mru.NewHandler(rhparams) - if err != nil { - return nil, err - } + resourceHandler = mru.NewHandler(rhparams) resourceHandler.SetStore(netStore) - var validators []storage.ChunkValidator - validators = append(validators, storage.NewContentAddressValidator(storage.MakeHashFunc(storage.DefaultHash))) - if resourceHandler != nil { - validators = append(validators, resourceHandler) + self.lstore.Validators = []storage.ChunkValidator{ + storage.NewContentAddressValidator(storage.MakeHashFunc(storage.DefaultHash)), + resourceHandler, } - self.lstore.Validators = validators // setup local store log.Debug(fmt.Sprintf("Set up local storage")) |