aboutsummaryrefslogtreecommitdiffstats
path: root/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util
diff options
context:
space:
mode:
authorTaylor Gerring <taylor.gerring@gmail.com>2015-02-16 21:28:33 +0800
committerTaylor Gerring <taylor.gerring@gmail.com>2015-02-16 21:28:33 +0800
commit702218008ee2b6d708d6b2821cdef80736bb3224 (patch)
treed55ff7ce88187082378e7d8e4c2f3aad14d23b4e /Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util
parent202362d9258335c695eb75f55f4be74a50a1af33 (diff)
downloadgo-tangerine-702218008ee2b6d708d6b2821cdef80736bb3224.tar.gz
go-tangerine-702218008ee2b6d708d6b2821cdef80736bb3224.tar.zst
go-tangerine-702218008ee2b6d708d6b2821cdef80736bb3224.zip
Add versioned dependencies from godep
Diffstat (limited to 'Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util')
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go293
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go369
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go30
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go48
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go16
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go49
6 files changed, 805 insertions, 0 deletions
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go
new file mode 100644
index 000000000..21de24255
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go
@@ -0,0 +1,293 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package util
+
+// This a copy of Go std bytes.Buffer with some modification
+// and some features stripped.
+
+import (
+ "bytes"
+ "io"
+)
+
+// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
+// The zero value for Buffer is an empty buffer ready to use.
+type Buffer struct {
+ buf []byte // contents are the bytes buf[off : len(buf)]
+ off int // read at &buf[off], write at &buf[len(buf)]
+ bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation.
+}
+
+// Bytes returns a slice of the contents of the unread portion of the buffer;
+// len(b.Bytes()) == b.Len(). If the caller changes the contents of the
+// returned slice, the contents of the buffer will change provided there
+// are no intervening method calls on the Buffer.
+func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
+
+// String returns the contents of the unread portion of the buffer
+// as a string. If the Buffer is a nil pointer, it returns "<nil>".
+func (b *Buffer) String() string {
+ if b == nil {
+ // Special case, useful in debugging.
+ return "<nil>"
+ }
+ return string(b.buf[b.off:])
+}
+
+// Len returns the number of bytes of the unread portion of the buffer;
+// b.Len() == len(b.Bytes()).
+func (b *Buffer) Len() int { return len(b.buf) - b.off }
+
+// Truncate discards all but the first n unread bytes from the buffer.
+// It panics if n is negative or greater than the length of the buffer.
+func (b *Buffer) Truncate(n int) {
+ switch {
+ case n < 0 || n > b.Len():
+ panic("leveldb/util.Buffer: truncation out of range")
+ case n == 0:
+ // Reuse buffer space.
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+n]
+}
+
+// Reset resets the buffer so it has no content.
+// b.Reset() is the same as b.Truncate(0).
+func (b *Buffer) Reset() { b.Truncate(0) }
+
+// grow grows the buffer to guarantee space for n more bytes.
+// It returns the index where bytes should be written.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) grow(n int) int {
+ m := b.Len()
+ // If buffer is empty, reset to recover space.
+ if m == 0 && b.off != 0 {
+ b.Truncate(0)
+ }
+ if len(b.buf)+n > cap(b.buf) {
+ var buf []byte
+ if b.buf == nil && n <= len(b.bootstrap) {
+ buf = b.bootstrap[0:]
+ } else if m+n <= cap(b.buf)/2 {
+ // We can slide things down instead of allocating a new
+ // slice. We only need m+n <= cap(b.buf) to slide, but
+ // we instead let capacity get twice as large so we
+ // don't spend all our time copying.
+ copy(b.buf[:], b.buf[b.off:])
+ buf = b.buf[:m]
+ } else {
+ // not enough space anywhere
+ buf = makeSlice(2*cap(b.buf) + n)
+ copy(buf, b.buf[b.off:])
+ }
+ b.buf = buf
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+m+n]
+ return b.off + m
+}
+
+// Alloc allocs n bytes of slice from the buffer, growing the buffer as
+// needed. If n is negative, Alloc will panic.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) Alloc(n int) []byte {
+ if n < 0 {
+ panic("leveldb/util.Buffer.Alloc: negative count")
+ }
+ m := b.grow(n)
+ return b.buf[m:]
+}
+
+// Grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After Grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+// If n is negative, Grow will panic.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) Grow(n int) {
+ if n < 0 {
+ panic("leveldb/util.Buffer.Grow: negative count")
+ }
+ m := b.grow(n)
+ b.buf = b.buf[0:m]
+}
+
+// Write appends the contents of p to the buffer, growing the buffer as
+// needed. The return value n is the length of p; err is always nil. If the
+// buffer becomes too large, Write will panic with bytes.ErrTooLarge.
+func (b *Buffer) Write(p []byte) (n int, err error) {
+ m := b.grow(len(p))
+ return copy(b.buf[m:], p), nil
+}
+
+// MinRead is the minimum slice size passed to a Read call by
+// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
+// what is required to hold the contents of r, ReadFrom will not grow the
+// underlying buffer.
+const MinRead = 512
+
+// ReadFrom reads data from r until EOF and appends it to the buffer, growing
+// the buffer as needed. The return value n is the number of bytes read. Any
+// error except io.EOF encountered during the read is also returned. If the
+// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge.
+func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
+ // If buffer is empty, reset to recover space.
+ if b.off >= len(b.buf) {
+ b.Truncate(0)
+ }
+ for {
+ if free := cap(b.buf) - len(b.buf); free < MinRead {
+ // not enough space at end
+ newBuf := b.buf
+ if b.off+free < MinRead {
+ // not enough space using beginning of buffer;
+ // double buffer capacity
+ newBuf = makeSlice(2*cap(b.buf) + MinRead)
+ }
+ copy(newBuf, b.buf[b.off:])
+ b.buf = newBuf[:len(b.buf)-b.off]
+ b.off = 0
+ }
+ m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
+ b.buf = b.buf[0 : len(b.buf)+m]
+ n += int64(m)
+ if e == io.EOF {
+ break
+ }
+ if e != nil {
+ return n, e
+ }
+ }
+ return n, nil // err is EOF, so return nil explicitly
+}
+
+// makeSlice allocates a slice of size n. If the allocation fails, it panics
+// with bytes.ErrTooLarge.
+func makeSlice(n int) []byte {
+ // If the make fails, give a known error.
+ defer func() {
+ if recover() != nil {
+ panic(bytes.ErrTooLarge)
+ }
+ }()
+ return make([]byte, n)
+}
+
+// WriteTo writes data to w until the buffer is drained or an error occurs.
+// The return value n is the number of bytes written; it always fits into an
+// int, but it is int64 to match the io.WriterTo interface. Any error
+// encountered during the write is also returned.
+func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
+ if b.off < len(b.buf) {
+ nBytes := b.Len()
+ m, e := w.Write(b.buf[b.off:])
+ if m > nBytes {
+ panic("leveldb/util.Buffer.WriteTo: invalid Write count")
+ }
+ b.off += m
+ n = int64(m)
+ if e != nil {
+ return n, e
+ }
+ // all bytes should have been written, by definition of
+ // Write method in io.Writer
+ if m != nBytes {
+ return n, io.ErrShortWrite
+ }
+ }
+ // Buffer is now empty; reset.
+ b.Truncate(0)
+ return
+}
+
+// WriteByte appends the byte c to the buffer, growing the buffer as needed.
+// The returned error is always nil, but is included to match bufio.Writer's
+// WriteByte. If the buffer becomes too large, WriteByte will panic with
+// bytes.ErrTooLarge.
+func (b *Buffer) WriteByte(c byte) error {
+ m := b.grow(1)
+ b.buf[m] = c
+ return nil
+}
+
+// Read reads the next len(p) bytes from the buffer or until the buffer
+// is drained. The return value n is the number of bytes read. If the
+// buffer has no data to return, err is io.EOF (unless len(p) is zero);
+// otherwise it is nil.
+func (b *Buffer) Read(p []byte) (n int, err error) {
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ if len(p) == 0 {
+ return
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, b.buf[b.off:])
+ b.off += n
+ return
+}
+
+// Next returns a slice containing the next n bytes from the buffer,
+// advancing the buffer as if the bytes had been returned by Read.
+// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
+// The slice is only valid until the next call to a read or write method.
+func (b *Buffer) Next(n int) []byte {
+ m := b.Len()
+ if n > m {
+ n = m
+ }
+ data := b.buf[b.off : b.off+n]
+ b.off += n
+ return data
+}
+
+// ReadByte reads and returns the next byte from the buffer.
+// If no byte is available, it returns error io.EOF.
+func (b *Buffer) ReadByte() (c byte, err error) {
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ return 0, io.EOF
+ }
+ c = b.buf[b.off]
+ b.off++
+ return c, nil
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
+ slice, err := b.readSlice(delim)
+ // return a copy of slice. The buffer's backing array may
+ // be overwritten by later calls.
+ line = append(line, slice...)
+ return
+}
+
+// readSlice is like ReadBytes but returns a reference to internal buffer data.
+func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
+ i := bytes.IndexByte(b.buf[b.off:], delim)
+ end := b.off + i + 1
+ if i < 0 {
+ end = len(b.buf)
+ err = io.EOF
+ }
+ line = b.buf[b.off:end]
+ b.off = end
+ return line, err
+}
+
+// NewBuffer creates and initializes a new Buffer using buf as its initial
+// contents. It is intended to prepare a Buffer to read existing data. It
+// can also be used to size the internal buffer for writing. To do that,
+// buf should have the desired capacity but a length of zero.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go
new file mode 100644
index 000000000..87d96739c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go
@@ -0,0 +1,369 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package util
+
+import (
+ "bytes"
+ "io"
+ "math/rand"
+ "runtime"
+ "testing"
+)
+
+const N = 10000 // make this bigger for a larger (and slower) test
+var data string // test data for write tests
+var testBytes []byte // test data; same as data but as a slice.
+
+func init() {
+ testBytes = make([]byte, N)
+ for i := 0; i < N; i++ {
+ testBytes[i] = 'a' + byte(i%26)
+ }
+ data = string(testBytes)
+}
+
+// Verify that contents of buf match the string s.
+func check(t *testing.T, testname string, buf *Buffer, s string) {
+ bytes := buf.Bytes()
+ str := buf.String()
+ if buf.Len() != len(bytes) {
+ t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes))
+ }
+
+ if buf.Len() != len(str) {
+ t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str))
+ }
+
+ if buf.Len() != len(s) {
+ t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s))
+ }
+
+ if string(bytes) != s {
+ t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s)
+ }
+}
+
+// Fill buf through n writes of byte slice fub.
+// The initial contents of buf corresponds to the string s;
+// the result is the final contents of buf returned as a string.
+func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string {
+ check(t, testname+" (fill 1)", buf, s)
+ for ; n > 0; n-- {
+ m, err := buf.Write(fub)
+ if m != len(fub) {
+ t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub))
+ }
+ if err != nil {
+ t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
+ }
+ s += string(fub)
+ check(t, testname+" (fill 4)", buf, s)
+ }
+ return s
+}
+
+func TestNewBuffer(t *testing.T) {
+ buf := NewBuffer(testBytes)
+ check(t, "NewBuffer", buf, data)
+}
+
+// Empty buf through repeated reads into fub.
+// The initial contents of buf corresponds to the string s.
+func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) {
+ check(t, testname+" (empty 1)", buf, s)
+
+ for {
+ n, err := buf.Read(fub)
+ if n == 0 {
+ break
+ }
+ if err != nil {
+ t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err)
+ }
+ s = s[n:]
+ check(t, testname+" (empty 3)", buf, s)
+ }
+
+ check(t, testname+" (empty 4)", buf, "")
+}
+
+func TestBasicOperations(t *testing.T) {
+ var buf Buffer
+
+ for i := 0; i < 5; i++ {
+ check(t, "TestBasicOperations (1)", &buf, "")
+
+ buf.Reset()
+ check(t, "TestBasicOperations (2)", &buf, "")
+
+ buf.Truncate(0)
+ check(t, "TestBasicOperations (3)", &buf, "")
+
+ n, err := buf.Write([]byte(data[0:1]))
+ if n != 1 {
+ t.Errorf("wrote 1 byte, but n == %d", n)
+ }
+ if err != nil {
+ t.Errorf("err should always be nil, but err == %s", err)
+ }
+ check(t, "TestBasicOperations (4)", &buf, "a")
+
+ buf.WriteByte(data[1])
+ check(t, "TestBasicOperations (5)", &buf, "ab")
+
+ n, err = buf.Write([]byte(data[2:26]))
+ if n != 24 {
+ t.Errorf("wrote 25 bytes, but n == %d", n)
+ }
+ check(t, "TestBasicOperations (6)", &buf, string(data[0:26]))
+
+ buf.Truncate(26)
+ check(t, "TestBasicOperations (7)", &buf, string(data[0:26]))
+
+ buf.Truncate(20)
+ check(t, "TestBasicOperations (8)", &buf, string(data[0:20]))
+
+ empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5))
+ empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100))
+
+ buf.WriteByte(data[1])
+ c, err := buf.ReadByte()
+ if err != nil {
+ t.Error("ReadByte unexpected eof")
+ }
+ if c != data[1] {
+ t.Errorf("ReadByte wrong value c=%v", c)
+ }
+ c, err = buf.ReadByte()
+ if err == nil {
+ t.Error("ReadByte unexpected not eof")
+ }
+ }
+}
+
+func TestLargeByteWrites(t *testing.T) {
+ var buf Buffer
+ limit := 30
+ if testing.Short() {
+ limit = 9
+ }
+ for i := 3; i < limit; i += 3 {
+ s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes)
+ empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i))
+ }
+ check(t, "TestLargeByteWrites (3)", &buf, "")
+}
+
+func TestLargeByteReads(t *testing.T) {
+ var buf Buffer
+ for i := 3; i < 30; i += 3 {
+ s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
+ empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
+ }
+ check(t, "TestLargeByteReads (3)", &buf, "")
+}
+
+func TestMixedReadsAndWrites(t *testing.T) {
+ var buf Buffer
+ s := ""
+ for i := 0; i < 50; i++ {
+ wlen := rand.Intn(len(data))
+ s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen])
+ rlen := rand.Intn(len(data))
+ fub := make([]byte, rlen)
+ n, _ := buf.Read(fub)
+ s = s[n:]
+ }
+ empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len()))
+}
+
+func TestNil(t *testing.T) {
+ var b *Buffer
+ if b.String() != "<nil>" {
+ t.Errorf("expected <nil>; got %q", b.String())
+ }
+}
+
+func TestReadFrom(t *testing.T) {
+ var buf Buffer
+ for i := 3; i < 30; i += 3 {
+ s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
+ var b Buffer
+ b.ReadFrom(&buf)
+ empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data)))
+ }
+}
+
+func TestWriteTo(t *testing.T) {
+ var buf Buffer
+ for i := 3; i < 30; i += 3 {
+ s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
+ var b Buffer
+ buf.WriteTo(&b)
+ empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data)))
+ }
+}
+
+func TestNext(t *testing.T) {
+ b := []byte{0, 1, 2, 3, 4}
+ tmp := make([]byte, 5)
+ for i := 0; i <= 5; i++ {
+ for j := i; j <= 5; j++ {
+ for k := 0; k <= 6; k++ {
+ // 0 <= i <= j <= 5; 0 <= k <= 6
+ // Check that if we start with a buffer
+ // of length j at offset i and ask for
+ // Next(k), we get the right bytes.
+ buf := NewBuffer(b[0:j])
+ n, _ := buf.Read(tmp[0:i])
+ if n != i {
+ t.Fatalf("Read %d returned %d", i, n)
+ }
+ bb := buf.Next(k)
+ want := k
+ if want > j-i {
+ want = j - i
+ }
+ if len(bb) != want {
+ t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb))
+ }
+ for l, v := range bb {
+ if v != byte(l+i) {
+ t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i)
+ }
+ }
+ }
+ }
+ }
+}
+
+var readBytesTests = []struct {
+ buffer string
+ delim byte
+ expected []string
+ err error
+}{
+ {"", 0, []string{""}, io.EOF},
+ {"a\x00", 0, []string{"a\x00"}, nil},
+ {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil},
+ {"hello\x01world", 1, []string{"hello\x01"}, nil},
+ {"foo\nbar", 0, []string{"foo\nbar"}, io.EOF},
+ {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil},
+ {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF},
+}
+
+func TestReadBytes(t *testing.T) {
+ for _, test := range readBytesTests {
+ buf := NewBuffer([]byte(test.buffer))
+ var err error
+ for _, expected := range test.expected {
+ var bytes []byte
+ bytes, err = buf.ReadBytes(test.delim)
+ if string(bytes) != expected {
+ t.Errorf("expected %q, got %q", expected, bytes)
+ }
+ if err != nil {
+ break
+ }
+ }
+ if err != test.err {
+ t.Errorf("expected error %v, got %v", test.err, err)
+ }
+ }
+}
+
+func TestGrow(t *testing.T) {
+ x := []byte{'x'}
+ y := []byte{'y'}
+ tmp := make([]byte, 72)
+ for _, startLen := range []int{0, 100, 1000, 10000, 100000} {
+ xBytes := bytes.Repeat(x, startLen)
+ for _, growLen := range []int{0, 100, 1000, 10000, 100000} {
+ buf := NewBuffer(xBytes)
+ // If we read, this affects buf.off, which is good to test.
+ readBytes, _ := buf.Read(tmp)
+ buf.Grow(growLen)
+ yBytes := bytes.Repeat(y, growLen)
+ // Check no allocation occurs in write, as long as we're single-threaded.
+ var m1, m2 runtime.MemStats
+ runtime.ReadMemStats(&m1)
+ buf.Write(yBytes)
+ runtime.ReadMemStats(&m2)
+ if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs {
+ t.Errorf("allocation occurred during write")
+ }
+ // Check that buffer has correct data.
+ if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) {
+ t.Errorf("bad initial data at %d %d", startLen, growLen)
+ }
+ if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) {
+ t.Errorf("bad written data at %d %d", startLen, growLen)
+ }
+ }
+ }
+}
+
+// Was a bug: used to give EOF reading empty slice at EOF.
+func TestReadEmptyAtEOF(t *testing.T) {
+ b := new(Buffer)
+ slice := make([]byte, 0)
+ n, err := b.Read(slice)
+ if err != nil {
+ t.Errorf("read error: %v", err)
+ }
+ if n != 0 {
+ t.Errorf("wrong count; got %d want 0", n)
+ }
+}
+
+// Tests that we occasionally compact. Issue 5154.
+func TestBufferGrowth(t *testing.T) {
+ var b Buffer
+ buf := make([]byte, 1024)
+ b.Write(buf[0:1])
+ var cap0 int
+ for i := 0; i < 5<<10; i++ {
+ b.Write(buf)
+ b.Read(buf)
+ if i == 0 {
+ cap0 = cap(b.buf)
+ }
+ }
+ cap1 := cap(b.buf)
+ // (*Buffer).grow allows for 2x capacity slop before sliding,
+ // so set our error threshold at 3x.
+ if cap1 > cap0*3 {
+ t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0)
+ }
+}
+
+// From Issue 5154.
+func BenchmarkBufferNotEmptyWriteRead(b *testing.B) {
+ buf := make([]byte, 1024)
+ for i := 0; i < b.N; i++ {
+ var b Buffer
+ b.Write(buf[0:1])
+ for i := 0; i < 5<<10; i++ {
+ b.Write(buf)
+ b.Read(buf)
+ }
+ }
+}
+
+// Check that we don't compact too often. From Issue 5154.
+func BenchmarkBufferFullSmallReads(b *testing.B) {
+ buf := make([]byte, 1024)
+ for i := 0; i < b.N; i++ {
+ var b Buffer
+ b.Write(buf)
+ for b.Len()+20 < cap(b.buf) {
+ b.Write(buf[:10])
+ }
+ for i := 0; i < 5<<10; i++ {
+ b.Read(buf[:1])
+ b.Write(buf[:1])
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go
new file mode 100644
index 000000000..631c9d610
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go
@@ -0,0 +1,30 @@
+// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+ "hash/crc32"
+)
+
+var table = crc32.MakeTable(crc32.Castagnoli)
+
+// CRC is a CRC-32 checksum computed using Castagnoli's polynomial.
+type CRC uint32
+
+// NewCRC creates a new crc based on the given bytes.
+func NewCRC(b []byte) CRC {
+ return CRC(0).Update(b)
+}
+
+// Update updates the crc with the given bytes.
+func (c CRC) Update(b []byte) CRC {
+ return CRC(crc32.Update(uint32(c), table, b))
+}
+
+// Value returns a masked crc.
+func (c CRC) Value() uint32 {
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go
new file mode 100644
index 000000000..54903660f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+// Hash return hash of the given data.
+func Hash(data []byte, seed uint32) uint32 {
+ // Similar to murmur hash
+ var m uint32 = 0xc6a4a793
+ var r uint32 = 24
+ h := seed ^ (uint32(len(data)) * m)
+
+ buf := bytes.NewBuffer(data)
+ for buf.Len() >= 4 {
+ var w uint32
+ binary.Read(buf, binary.LittleEndian, &w)
+ h += w
+ h *= m
+ h ^= (h >> 16)
+ }
+
+ rest := buf.Bytes()
+ switch len(rest) {
+ default:
+ panic("not reached")
+ case 3:
+ h += uint32(rest[2]) << 16
+ fallthrough
+ case 2:
+ h += uint32(rest[1]) << 8
+ fallthrough
+ case 1:
+ h += uint32(rest[0])
+ h *= m
+ h ^= (h >> r)
+ case 0:
+ }
+
+ return h
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go
new file mode 100644
index 000000000..da0583123
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+// Range is a key range.
+type Range struct {
+ // Start of the key range, include in the range.
+ Start []byte
+
+ // Limit of the key range, not include in the range.
+ Limit []byte
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go
new file mode 100644
index 000000000..229c7d41f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package util provides utilities used throughout leveldb.
+package util
+
+import (
+ "errors"
+)
+
+var (
+ ErrNotFound = errors.New("leveldb: not found")
+)
+
+// Releaser is the interface that wraps the basic Release method.
+type Releaser interface {
+ // Release releases associated resources. Release should always success
+ // and can be called multipe times without causing error.
+ Release()
+}
+
+// ReleaseSetter is the interface that wraps the basic SetReleaser method.
+type ReleaseSetter interface {
+ // SetReleaser associates the given releaser to the resources. The
+ // releaser will be called once coresponding resources released.
+ // Calling SetReleaser with nil will clear the releaser.
+ SetReleaser(releaser Releaser)
+}
+
+// BasicReleaser provides basic implementation of Releaser and ReleaseSetter.
+type BasicReleaser struct {
+ releaser Releaser
+}
+
+// Release implements Releaser.Release.
+func (r *BasicReleaser) Release() {
+ if r.releaser != nil {
+ r.releaser.Release()
+ r.releaser = nil
+ }
+}
+
+// SetReleaser implements ReleaseSetter.SetReleaser.
+func (r *BasicReleaser) SetReleaser(releaser Releaser) {
+ r.releaser = releaser
+}