aboutsummaryrefslogtreecommitdiffstats
path: root/swarm/bmt
diff options
context:
space:
mode:
authorAnton Evangelatov <anton.evangelatov@gmail.com>2018-08-14 22:03:56 +0800
committerBalint Gabor <balint.g@gmail.com>2018-08-14 22:03:56 +0800
commit97887d98da703a31040bceee13bce9ee77fca673 (patch)
treeffc9a8a6fba087b26d33c0ef6c939a19aaaad8c9 /swarm/bmt
parent8a040de60bd6b740ebe87cd8e1fe6bfdb6635d2f (diff)
downloaddexon-97887d98da703a31040bceee13bce9ee77fca673.tar.gz
dexon-97887d98da703a31040bceee13bce9ee77fca673.tar.zst
dexon-97887d98da703a31040bceee13bce9ee77fca673.zip
swarm/network, swarm/storage: validate chunk size (#17397)
* swarm/network, swarm/storage: validate default chunk size * swarm/bmt, swarm/network, swarm/storage: update BMT hash initialisation * swarm/bmt: move segmentCount to tests * swarm/chunk: change chunk.DefaultSize to be untyped const * swarm/storage: add size validator * swarm/storage: add chunk size validation to localstore * swarm/storage: move validation from localstore to validator * swarm/storage: global chunk rules in MRU
Diffstat (limited to 'swarm/bmt')
-rw-r--r--swarm/bmt/bmt.go5
-rw-r--r--swarm/bmt/bmt_test.go19
2 files changed, 14 insertions, 10 deletions
diff --git a/swarm/bmt/bmt.go b/swarm/bmt/bmt.go
index 97e0e141e..a85d4369e 100644
--- a/swarm/bmt/bmt.go
+++ b/swarm/bmt/bmt.go
@@ -55,9 +55,6 @@ Two implementations are provided:
*/
const (
- // SegmentCount is the maximum number of segments of the underlying chunk
- // Should be equal to max-chunk-data-size / hash-size
- SegmentCount = 128
// PoolSize is the maximum number of bmt trees used by the hashers, i.e,
// the maximum number of concurrent BMT hashing operations performed by the same hasher
PoolSize = 8
@@ -318,7 +315,7 @@ func (h *Hasher) Sum(b []byte) (s []byte) {
// with every full segment calls writeSection in a go routine
func (h *Hasher) Write(b []byte) (int, error) {
l := len(b)
- if l == 0 || l > 4096 {
+ if l == 0 || l > h.pool.Size {
return 0, nil
}
t := h.getTree()
diff --git a/swarm/bmt/bmt_test.go b/swarm/bmt/bmt_test.go
index 891d8cbb2..760aa11d8 100644
--- a/swarm/bmt/bmt_test.go
+++ b/swarm/bmt/bmt_test.go
@@ -34,6 +34,13 @@ import (
// the actual data length generated (could be longer than max datalength of the BMT)
const BufferSize = 4128
+const (
+ // segmentCount is the maximum number of segments of the underlying chunk
+ // Should be equal to max-chunk-data-size / hash-size
+ // Currently set to 128 == 4096 (default chunk size) / 32 (sha3.keccak256 size)
+ segmentCount = 128
+)
+
var counts = []int{1, 2, 3, 4, 5, 8, 9, 15, 16, 17, 32, 37, 42, 53, 63, 64, 65, 111, 127, 128}
// calculates the Keccak256 SHA3 hash of the data
@@ -224,14 +231,14 @@ func TestHasherReuse(t *testing.T) {
// tests if bmt reuse is not corrupting result
func testHasherReuse(poolsize int, t *testing.T) {
hasher := sha3.NewKeccak256
- pool := NewTreePool(hasher, SegmentCount, poolsize)
+ pool := NewTreePool(hasher, segmentCount, poolsize)
defer pool.Drain(0)
bmt := New(pool)
for i := 0; i < 100; i++ {
data := newData(BufferSize)
n := rand.Intn(bmt.Size())
- err := testHasherCorrectness(bmt, hasher, data, n, SegmentCount)
+ err := testHasherCorrectness(bmt, hasher, data, n, segmentCount)
if err != nil {
t.Fatal(err)
}
@@ -241,7 +248,7 @@ func testHasherReuse(poolsize int, t *testing.T) {
// Tests if pool can be cleanly reused even in concurrent use by several hasher
func TestBMTConcurrentUse(t *testing.T) {
hasher := sha3.NewKeccak256
- pool := NewTreePool(hasher, SegmentCount, PoolSize)
+ pool := NewTreePool(hasher, segmentCount, PoolSize)
defer pool.Drain(0)
cycles := 100
errc := make(chan error)
@@ -451,7 +458,7 @@ func benchmarkBMTBaseline(t *testing.B, n int) {
func benchmarkBMT(t *testing.B, n int) {
data := newData(n)
hasher := sha3.NewKeccak256
- pool := NewTreePool(hasher, SegmentCount, PoolSize)
+ pool := NewTreePool(hasher, segmentCount, PoolSize)
bmt := New(pool)
t.ReportAllocs()
@@ -465,7 +472,7 @@ func benchmarkBMT(t *testing.B, n int) {
func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) {
data := newData(n)
hasher := sha3.NewKeccak256
- pool := NewTreePool(hasher, SegmentCount, PoolSize)
+ pool := NewTreePool(hasher, segmentCount, PoolSize)
bmt := New(pool).NewAsyncWriter(double)
idxs, segments := splitAndShuffle(bmt.SectionSize(), data)
shuffle(len(idxs), func(i int, j int) {
@@ -483,7 +490,7 @@ func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) {
func benchmarkPool(t *testing.B, poolsize, n int) {
data := newData(n)
hasher := sha3.NewKeccak256
- pool := NewTreePool(hasher, SegmentCount, poolsize)
+ pool := NewTreePool(hasher, segmentCount, poolsize)
cycles := 100
t.ReportAllocs()