aboutsummaryrefslogtreecommitdiffstats
path: root/swarm/bmt/bmt_test.go
diff options
context:
space:
mode:
authorDave McGregor <dave.s.mcgregor@gmail.com>2019-01-04 06:15:26 +0800
committerPéter Szilágyi <peterke@gmail.com>2019-01-04 15:26:07 +0800
commit33d233d3e18359123993d3f54987441290faf212 (patch)
tree6c203ca106f29eb3525df9bdd4737b1ecb48b77d /swarm/bmt/bmt_test.go
parent49975264a8d37aa9af1a2b71015059245c0c2e0b (diff)
downloaddexon-33d233d3e18359123993d3f54987441290faf212.tar.gz
dexon-33d233d3e18359123993d3f54987441290faf212.tar.zst
dexon-33d233d3e18359123993d3f54987441290faf212.zip
vendor, crypto, swarm: switch over to upstream sha3 package
Diffstat (limited to 'swarm/bmt/bmt_test.go')
-rw-r--r--swarm/bmt/bmt_test.go30
1 files changed, 15 insertions, 15 deletions
diff --git a/swarm/bmt/bmt_test.go b/swarm/bmt/bmt_test.go
index 683ba4f5b..ab712d08c 100644
--- a/swarm/bmt/bmt_test.go
+++ b/swarm/bmt/bmt_test.go
@@ -26,8 +26,8 @@ import (
"testing"
"time"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/testutil"
+ "golang.org/x/crypto/sha3"
)
// the actual data length generated (could be longer than max datalength of the BMT)
@@ -44,7 +44,7 @@ var counts = []int{1, 2, 3, 4, 5, 8, 9, 15, 16, 17, 32, 37, 42, 53, 63, 64, 65,
// calculates the Keccak256 SHA3 hash of the data
func sha3hash(data ...[]byte) []byte {
- h := sha3.NewKeccak256()
+ h := sha3.NewLegacyKeccak256()
return doSum(h, nil, data...)
}
@@ -121,7 +121,7 @@ func TestRefHasher(t *testing.T) {
t.Run(fmt.Sprintf("%d_segments_%d_bytes", segmentCount, length), func(t *testing.T) {
data := testutil.RandomBytes(i, length)
expected := x.expected(data)
- actual := NewRefHasher(sha3.NewKeccak256, segmentCount).Hash(data)
+ actual := NewRefHasher(sha3.NewLegacyKeccak256, segmentCount).Hash(data)
if !bytes.Equal(actual, expected) {
t.Fatalf("expected %x, got %x", expected, actual)
}
@@ -133,7 +133,7 @@ func TestRefHasher(t *testing.T) {
// tests if hasher responds with correct hash comparing the reference implementation return value
func TestHasherEmptyData(t *testing.T) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
var data []byte
for _, count := range counts {
t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) {
@@ -153,7 +153,7 @@ func TestHasherEmptyData(t *testing.T) {
// tests sequential write with entire max size written in one go
func TestSyncHasherCorrectness(t *testing.T) {
data := testutil.RandomBytes(1, BufferSize)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
size := hasher().Size()
var err error
@@ -179,7 +179,7 @@ func TestSyncHasherCorrectness(t *testing.T) {
// tests order-neutral concurrent writes with entire max size written in one go
func TestAsyncCorrectness(t *testing.T) {
data := testutil.RandomBytes(1, BufferSize)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
size := hasher().Size()
whs := []whenHash{first, last, random}
@@ -226,7 +226,7 @@ func TestHasherReuse(t *testing.T) {
// tests if bmt reuse is not corrupting result
func testHasherReuse(poolsize int, t *testing.T) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, poolsize)
defer pool.Drain(0)
bmt := New(pool)
@@ -243,7 +243,7 @@ func testHasherReuse(poolsize int, t *testing.T) {
// Tests if pool can be cleanly reused even in concurrent use by several hasher
func TestBMTConcurrentUse(t *testing.T) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, PoolSize)
defer pool.Drain(0)
cycles := 100
@@ -277,7 +277,7 @@ LOOP:
// Tests BMT Hasher io.Writer interface is working correctly
// even multiple short random write buffers
func TestBMTWriterBuffers(t *testing.T) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
for _, count := range counts {
t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) {
@@ -410,7 +410,7 @@ func BenchmarkPool(t *testing.B) {
// benchmarks simple sha3 hash on chunks
func benchmarkSHA3(t *testing.B, n int) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
h := hasher()
t.ReportAllocs()
@@ -426,7 +426,7 @@ func benchmarkSHA3(t *testing.B, n int) {
// the premise is that this is the minimum computation needed for a BMT
// therefore this serves as a theoretical optimum for concurrent implementations
func benchmarkBMTBaseline(t *testing.B, n int) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
hashSize := hasher().Size()
data := testutil.RandomBytes(1, hashSize)
@@ -453,7 +453,7 @@ func benchmarkBMTBaseline(t *testing.B, n int) {
// benchmarks BMT Hasher
func benchmarkBMT(t *testing.B, n int) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, PoolSize)
bmt := New(pool)
@@ -467,7 +467,7 @@ func benchmarkBMT(t *testing.B, n int) {
// benchmarks BMT hasher with asynchronous concurrent segment/section writes
func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, PoolSize)
bmt := New(pool).NewAsyncWriter(double)
idxs, segments := splitAndShuffle(bmt.SectionSize(), data)
@@ -485,7 +485,7 @@ func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) {
// benchmarks 100 concurrent bmt hashes with pool capacity
func benchmarkPool(t *testing.B, poolsize, n int) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, poolsize)
cycles := 100
@@ -508,7 +508,7 @@ func benchmarkPool(t *testing.B, poolsize, n int) {
// benchmarks the reference hasher
func benchmarkRefHasher(t *testing.B, n int) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
rbmt := NewRefHasher(hasher, 128)
t.ReportAllocs()