aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--accounts/abi/abi.go150
-rw-r--r--accounts/abi/abi_test.go233
-rw-r--r--accounts/abi/type.go10
-rw-r--r--cmd/utils/flags.go2
-rw-r--r--core/vm/runtime/runtime.go53
-rw-r--r--core/vm/runtime/runtime_test.go46
-rw-r--r--core/vm_env.go36
-rw-r--r--eth/api.go111
-rw-r--r--eth/backend.go4
-rw-r--r--eth/downloader/api.go7
-rw-r--r--eth/downloader/downloader.go15
-rw-r--r--eth/downloader/downloader_test.go32
-rw-r--r--eth/downloader/queue.go5
-rw-r--r--eth/filters/api.go46
-rw-r--r--eth/filters/filter.go2
-rw-r--r--eth/filters/filter_system.go23
-rw-r--r--eth/filters/filter_system_test.go87
-rw-r--r--jsre/ethereum_js.go218
-rw-r--r--miner/api.go75
19 files changed, 843 insertions, 312 deletions
diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go
index b84fd463a..2dc8039f5 100644
--- a/accounts/abi/abi.go
+++ b/accounts/abi/abi.go
@@ -20,11 +20,10 @@ import (
"encoding/json"
"fmt"
"io"
- "math"
+ "reflect"
+ "strings"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/logger"
- "github.com/ethereum/go-ethereum/logger/glog"
)
// Executer is an executer method for performing state executions. It takes one
@@ -101,52 +100,143 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
}
// toGoType parses the input and casts it to the proper type defined by the ABI
-// argument in t.
-func toGoType(t Argument, input []byte) interface{} {
+// argument in T.
+func toGoType(i int, t Argument, output []byte) (interface{}, error) {
+ index := i * 32
+
+ if index+32 > len(output) {
+ return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), index+32)
+ }
+
+ // Parse the given index output and check whether we need to read
+ // a different offset and length based on the type (i.e. string, bytes)
+ var returnOutput []byte
+ switch t.Type.T {
+ case StringTy, BytesTy: // variable arrays are written at the end of the return bytes
+ // parse offset from which we should start reading
+ offset := int(common.BytesToBig(output[index : index+32]).Uint64())
+ if offset+32 > len(output) {
+ return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32)
+ }
+ // parse the size up until we should be reading
+ size := int(common.BytesToBig(output[offset : offset+32]).Uint64())
+ if offset+32+size > len(output) {
+ return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+size)
+ }
+
+ // get the bytes for this return value
+ returnOutput = output[offset+32 : offset+32+size]
+ default:
+ returnOutput = output[index : index+32]
+ }
+
+ // cast bytes to abi return type
switch t.Type.T {
case IntTy:
- return common.BytesToBig(input)
+ return common.BytesToBig(returnOutput), nil
case UintTy:
- return common.BytesToBig(input)
+ return common.BytesToBig(returnOutput), nil
case BoolTy:
- return common.BytesToBig(input).Uint64() > 0
+ return common.BytesToBig(returnOutput).Uint64() > 0, nil
case AddressTy:
- return common.BytesToAddress(input)
+ return common.BytesToAddress(returnOutput), nil
case HashTy:
- return common.BytesToHash(input)
+ return common.BytesToHash(returnOutput), nil
+ case BytesTy, FixedBytesTy:
+ return returnOutput, nil
+ case StringTy:
+ return string(returnOutput), nil
}
- return nil
+ return nil, fmt.Errorf("abi: unknown type %v", t.Type.T)
}
-// Call executes a call and attemps to parse the return values and returns it as
-// an interface. It uses the executer method to perform the actual call since
-// the abi knows nothing of the lower level calling mechanism.
+// Call will unmarshal the output of the call in v. It will return an error if
+// invalid type is given or if the output is too short to conform to the ABI
+// spec.
//
-// Call supports all abi types and includes multiple return values. When only
-// one item is returned a single interface{} will be returned, if a contract
-// method returns multiple values an []interface{} slice is returned.
-func (abi ABI) Call(executer Executer, name string, args ...interface{}) interface{} {
+// Call supports all of the available types and accepts a struct or an interface
+// slice if the return is a tuple.
+func (abi ABI) Call(executer Executer, v interface{}, name string, args ...interface{}) error {
callData, err := abi.Pack(name, args...)
if err != nil {
- glog.V(logger.Debug).Infoln("pack error:", err)
- return nil
+ return err
}
- output := executer(callData)
+ return abi.unmarshal(v, name, executer(callData))
+}
- method := abi.Methods[name]
- ret := make([]interface{}, int(math.Max(float64(len(method.Outputs)), float64(len(output)/32))))
- for i := 0; i < len(ret); i += 32 {
- index := i / 32
- ret[index] = toGoType(method.Outputs[index], output[i:i+32])
+var interSlice = reflect.TypeOf([]interface{}{})
+
+// unmarshal output in v according to the abi specification
+func (abi ABI) unmarshal(v interface{}, name string, output []byte) error {
+ var method = abi.Methods[name]
+
+ if len(output) == 0 {
+ return fmt.Errorf("abi: unmarshalling empty output")
}
- // return single interface
- if len(ret) == 1 {
- return ret[0]
+ value := reflect.ValueOf(v).Elem()
+ typ := value.Type()
+
+ if len(method.Outputs) > 1 {
+ switch value.Kind() {
+ // struct will match named return values to the struct's field
+ // names
+ case reflect.Struct:
+ for i := 0; i < len(method.Outputs); i++ {
+ marshalledValue, err := toGoType(i, method.Outputs[i], output)
+ if err != nil {
+ return err
+ }
+ reflectValue := reflect.ValueOf(marshalledValue)
+
+ for j := 0; j < typ.NumField(); j++ {
+ field := typ.Field(j)
+ // TODO read tags: `abi:"fieldName"`
+ if field.Name == strings.ToUpper(method.Outputs[i].Name[:1])+method.Outputs[i].Name[1:] {
+ if field.Type.AssignableTo(reflectValue.Type()) {
+ value.Field(j).Set(reflectValue)
+ break
+ } else {
+ return fmt.Errorf("abi: cannot unmarshal %v in to %v", field.Type, reflectValue.Type())
+ }
+ }
+ }
+ }
+ case reflect.Slice:
+ if !value.Type().AssignableTo(interSlice) {
+ return fmt.Errorf("abi: cannot marshal tuple in to slice %T (only []interface{} is supported)", v)
+ }
+
+ // create a new slice and start appending the unmarshalled
+ // values to the new interface slice.
+ z := reflect.MakeSlice(typ, 0, len(method.Outputs))
+ for i := 0; i < len(method.Outputs); i++ {
+ marshalledValue, err := toGoType(i, method.Outputs[i], output)
+ if err != nil {
+ return err
+ }
+ z = reflect.Append(z, reflect.ValueOf(marshalledValue))
+ }
+ value.Set(z)
+ default:
+ return fmt.Errorf("abi: cannot unmarshal tuple in to %v", typ)
+ }
+
+ } else {
+ marshalledValue, err := toGoType(0, method.Outputs[0], output)
+ if err != nil {
+ return err
+ }
+ reflectValue := reflect.ValueOf(marshalledValue)
+ if typ.AssignableTo(reflectValue.Type()) {
+ value.Set(reflectValue)
+ } else {
+ return fmt.Errorf("abi: cannot unmarshal %v in to %v", reflectValue.Type(), value.Type())
+ }
}
- return ret
+ return nil
}
func (abi *ABI) UnmarshalJSON(data []byte) error {
diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go
index 000c118f8..bb0143d21 100644
--- a/accounts/abi/abi_test.go
+++ b/accounts/abi/abi_test.go
@@ -394,6 +394,7 @@ func TestBytes(t *testing.T) {
}
}
+/*
func TestReturn(t *testing.T) {
const definition = `[
{ "type" : "function", "name" : "balance", "const" : true, "inputs" : [], "outputs" : [ { "name": "", "type": "hash" } ] },
@@ -422,6 +423,7 @@ func TestReturn(t *testing.T) {
t.Errorf("expected type common.Address, got %T", r)
}
}
+*/
func TestDefaultFunctionParsing(t *testing.T) {
const definition = `[{ "name" : "balance" }]`
@@ -458,3 +460,234 @@ func TestBareEvents(t *testing.T) {
t.Error("expected 'name' event to be present")
}
}
+
+func TestMultiReturnWithStruct(t *testing.T) {
+ const definition = `[
+ { "name" : "multi", "const" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
+
+ abi, err := JSON(strings.NewReader(definition))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // using buff to make the code readable
+ buff := new(bytes.Buffer)
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
+ stringOut := "hello"
+ buff.Write(common.RightPadBytes([]byte(stringOut), 32))
+
+ var inter struct {
+ Int *big.Int
+ String string
+ }
+ err = abi.unmarshal(&inter, "multi", buff.Bytes())
+ if err != nil {
+ t.Error(err)
+ }
+
+ if inter.Int == nil || inter.Int.Cmp(big.NewInt(1)) != 0 {
+ t.Error("expected Int to be 1 got", inter.Int)
+ }
+
+ if inter.String != stringOut {
+ t.Error("expected String to be", stringOut, "got", inter.String)
+ }
+
+ var reversed struct {
+ String string
+ Int *big.Int
+ }
+
+ err = abi.unmarshal(&reversed, "multi", buff.Bytes())
+ if err != nil {
+ t.Error(err)
+ }
+
+ if reversed.Int == nil || reversed.Int.Cmp(big.NewInt(1)) != 0 {
+ t.Error("expected Int to be 1 got", reversed.Int)
+ }
+
+ if reversed.String != stringOut {
+ t.Error("expected String to be", stringOut, "got", reversed.String)
+ }
+}
+
+func TestMultiReturnWithSlice(t *testing.T) {
+ const definition = `[
+ { "name" : "multi", "const" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
+
+ abi, err := JSON(strings.NewReader(definition))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // using buff to make the code readable
+ buff := new(bytes.Buffer)
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
+ stringOut := "hello"
+ buff.Write(common.RightPadBytes([]byte(stringOut), 32))
+
+ var inter []interface{}
+ err = abi.unmarshal(&inter, "multi", buff.Bytes())
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(inter) != 2 {
+ t.Fatal("expected 2 results got", len(inter))
+ }
+
+ if num, ok := inter[0].(*big.Int); !ok || num.Cmp(big.NewInt(1)) != 0 {
+ t.Error("expected index 0 to be 1 got", num)
+ }
+
+ if str, ok := inter[1].(string); !ok || str != stringOut {
+ t.Error("expected index 1 to be", stringOut, "got", str)
+ }
+}
+
+func TestUnmarshal(t *testing.T) {
+ const definition = `[
+ { "name" : "int", "const" : false, "outputs": [ { "type": "uint256" } ] },
+ { "name" : "bool", "const" : false, "outputs": [ { "type": "bool" } ] },
+ { "name" : "bytes", "const" : false, "outputs": [ { "type": "bytes" } ] },
+ { "name" : "multi", "const" : false, "outputs": [ { "type": "bytes" }, { "type": "bytes" } ] },
+ { "name" : "mixedBytes", "const" : true, "outputs": [ { "name": "a", "type": "bytes" }, { "name": "b", "type": "bytes32" } ] }]`
+
+ abi, err := JSON(strings.NewReader(definition))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // marshal int
+ var Int *big.Int
+ err = abi.unmarshal(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
+ if err != nil {
+ t.Error(err)
+ }
+
+ if Int == nil || Int.Cmp(big.NewInt(1)) != 0 {
+ t.Error("expected Int to be 1 got", Int)
+ }
+
+ // marshal bool
+ var Bool bool
+ err = abi.unmarshal(&Bool, "bool", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
+ if err != nil {
+ t.Error(err)
+ }
+
+ if !Bool {
+ t.Error("expected Bool to be true")
+ }
+
+ // marshal dynamic bytes max length 32
+ buff := new(bytes.Buffer)
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
+ bytesOut := common.RightPadBytes([]byte("hello"), 32)
+ buff.Write(bytesOut)
+
+ var Bytes []byte
+ err = abi.unmarshal(&Bytes, "bytes", buff.Bytes())
+ if err != nil {
+ t.Error(err)
+ }
+
+ if !bytes.Equal(Bytes, bytesOut) {
+ t.Errorf("expected %x got %x", bytesOut, Bytes)
+ }
+
+ // marshall dynamic bytes max length 64
+ buff.Reset()
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
+ bytesOut = common.RightPadBytes([]byte("hello"), 64)
+ buff.Write(bytesOut)
+
+ err = abi.unmarshal(&Bytes, "bytes", buff.Bytes())
+ if err != nil {
+ t.Error(err)
+ }
+
+ if !bytes.Equal(Bytes, bytesOut) {
+ t.Errorf("expected %x got %x", bytesOut, Bytes)
+ }
+
+ // marshall dynamic bytes max length 63
+ buff.Reset()
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
+ buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000003f"))
+ bytesOut = common.RightPadBytes([]byte("hello"), 63)
+ buff.Write(bytesOut)
+
+ err = abi.unmarshal(&Bytes, "bytes", buff.Bytes())
+ if err != nil {
+ t.Error(err)
+ }
+
+ if !bytes.Equal(Bytes, bytesOut) {
+ t.Errorf("expected %x got %x", bytesOut, Bytes)
+ }
+
+ // marshal dynamic bytes output empty
+ err = abi.unmarshal(&Bytes, "bytes", nil)
+ if err == nil {
+ t.Error("expected error")
+ }
+
+ // marshal dynamic bytes length 5
+ buff.Reset()
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
+ buff.Write(common.RightPadBytes([]byte("hello"), 32))
+
+ err = abi.unmarshal(&Bytes, "bytes", buff.Bytes())
+ if err != nil {
+ t.Error(err)
+ }
+
+ if !bytes.Equal(Bytes, []byte("hello")) {
+ t.Errorf("expected %x got %x", bytesOut, Bytes)
+ }
+
+ // marshal error
+ buff.Reset()
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
+ err = abi.unmarshal(&Bytes, "bytes", buff.Bytes())
+ if err == nil {
+ t.Error("expected error")
+ }
+
+ err = abi.unmarshal(&Bytes, "multi", make([]byte, 64))
+ if err == nil {
+ t.Error("expected error")
+ }
+
+ // marshal mixed bytes
+ buff.Reset()
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
+ fixed := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")
+ buff.Write(fixed)
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
+ bytesOut = common.RightPadBytes([]byte("hello"), 32)
+ buff.Write(bytesOut)
+
+ var out []interface{}
+ err = abi.unmarshal(&out, "mixedBytes", buff.Bytes())
+ if err != nil {
+ t.Fatal("didn't expect error:", err)
+ }
+
+ if !bytes.Equal(bytesOut, out[0].([]byte)) {
+ t.Errorf("expected %x, got %x", bytesOut, out[0])
+ }
+
+ if !bytes.Equal(fixed, out[1].([]byte)) {
+ t.Errorf("expected %x, got %x", fixed, out[1])
+ }
+}
diff --git a/accounts/abi/type.go b/accounts/abi/type.go
index 32f761ef0..6fb2950ba 100644
--- a/accounts/abi/type.go
+++ b/accounts/abi/type.go
@@ -29,8 +29,11 @@ const (
IntTy byte = iota
UintTy
BoolTy
+ StringTy
SliceTy
AddressTy
+ FixedBytesTy
+ BytesTy
HashTy
RealTy
)
@@ -118,6 +121,7 @@ func NewType(t string) (typ Type, err error) {
typ.T = UintTy
case "bool":
typ.Kind = reflect.Bool
+ typ.T = BoolTy
case "real": // TODO
typ.Kind = reflect.Invalid
case "address":
@@ -128,6 +132,7 @@ func NewType(t string) (typ Type, err error) {
case "string":
typ.Kind = reflect.String
typ.Size = -1
+ typ.T = StringTy
if vsize > 0 {
typ.Size = 32
}
@@ -140,6 +145,11 @@ func NewType(t string) (typ Type, err error) {
typ.Kind = reflect.Slice
typ.Type = byte_ts
typ.Size = vsize
+ if vsize == 0 {
+ typ.T = BytesTy
+ } else {
+ typ.T = FixedBytesTy
+ }
default:
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
}
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 2fc1816af..8e89b9fb1 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -337,7 +337,7 @@ var (
// ATM the url is left to the user and deployment to
JSpathFlag = cli.StringFlag{
Name: "jspath",
- Usage: "JavaSript root path for `loadScript` and document root for `admin.httpGet`",
+ Usage: "JavaScript root path for `loadScript` and document root for `admin.httpGet`",
Value: ".",
}
SolcPathFlag = cli.StringFlag{
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index dd3aa1b0b..1fa06e980 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -41,6 +41,7 @@ type Config struct {
DisableJit bool // "disable" so it's enabled by default
Debug bool
+ State *state.StateDB
GetHashFn func(n uint64) common.Hash
}
@@ -94,12 +95,14 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
vm.EnableJit = !cfg.DisableJit
vm.Debug = cfg.Debug
+ if cfg.State == nil {
+ db, _ := ethdb.NewMemDatabase()
+ cfg.State, _ = state.New(common.Hash{}, db)
+ }
var (
- db, _ = ethdb.NewMemDatabase()
- statedb, _ = state.New(common.Hash{}, db)
- vmenv = NewEnv(cfg, statedb)
- sender = statedb.CreateAccount(cfg.Origin)
- receiver = statedb.CreateAccount(common.StringToAddress("contract"))
+ vmenv = NewEnv(cfg, cfg.State)
+ sender = cfg.State.CreateAccount(cfg.Origin)
+ receiver = cfg.State.CreateAccount(common.StringToAddress("contract"))
)
// set the receiver's (the executing contract) code for execution.
receiver.SetCode(code)
@@ -117,5 +120,43 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
if cfg.Debug {
vm.StdErrFormat(vmenv.StructLogs())
}
- return ret, statedb, err
+ return ret, cfg.State, err
+}
+
+// Call executes the code given by the contract's address. It will return the
+// EVM's return value or an error if it failed.
+//
+// Call, unlike Execute, requires a config and also requires the State field to
+// be set.
+func Call(address common.Address, input []byte, cfg *Config) ([]byte, error) {
+ setDefaults(cfg)
+
+ // defer the call to setting back the original values
+ defer func(debug, forceJit, enableJit bool) {
+ vm.Debug = debug
+ vm.ForceJit = forceJit
+ vm.EnableJit = enableJit
+ }(vm.Debug, vm.ForceJit, vm.EnableJit)
+
+ vm.ForceJit = !cfg.DisableJit
+ vm.EnableJit = !cfg.DisableJit
+ vm.Debug = cfg.Debug
+
+ vmenv := NewEnv(cfg, cfg.State)
+
+ sender := cfg.State.GetOrNewStateObject(cfg.Origin)
+ // Call the code with the given configuration.
+ ret, err := vmenv.Call(
+ sender,
+ address,
+ input,
+ cfg.GasLimit,
+ cfg.GasPrice,
+ cfg.Value,
+ )
+
+ if cfg.Debug {
+ vm.StdErrFormat(vmenv.StructLogs())
+ }
+ return ret, err
}
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index 773a0163e..e5183052f 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -17,12 +17,15 @@
package runtime
import (
+ "math/big"
"strings"
"testing"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/ethdb"
)
func TestDefaults(t *testing.T) {
@@ -71,6 +74,49 @@ func TestEnvironment(t *testing.T) {
}, nil, nil)
}
+func TestExecute(t *testing.T) {
+ ret, _, err := Execute([]byte{
+ byte(vm.PUSH1), 10,
+ byte(vm.PUSH1), 0,
+ byte(vm.MSTORE),
+ byte(vm.PUSH1), 32,
+ byte(vm.PUSH1), 0,
+ byte(vm.RETURN),
+ }, nil, nil)
+ if err != nil {
+ t.Fatal("didn't expect error", err)
+ }
+
+ num := common.BytesToBig(ret)
+ if num.Cmp(big.NewInt(10)) != 0 {
+ t.Error("Expected 10, got", num)
+ }
+}
+
+func TestCall(t *testing.T) {
+ db, _ := ethdb.NewMemDatabase()
+ state, _ := state.New(common.Hash{}, db)
+ address := common.HexToAddress("0x0a")
+ state.SetCode(address, []byte{
+ byte(vm.PUSH1), 10,
+ byte(vm.PUSH1), 0,
+ byte(vm.MSTORE),
+ byte(vm.PUSH1), 32,
+ byte(vm.PUSH1), 0,
+ byte(vm.RETURN),
+ })
+
+ ret, err := Call(address, nil, &Config{State: state})
+ if err != nil {
+ t.Fatal("didn't expect error", err)
+ }
+
+ num := common.BytesToBig(ret)
+ if num.Cmp(big.NewInt(10)) != 0 {
+ t.Error("Expected 10, got", num)
+ }
+}
+
func TestRestoreDefaults(t *testing.T) {
Execute(nil, nil, &Config{Debug: true})
if vm.ForceJit {
diff --git a/core/vm_env.go b/core/vm_env.go
index c8b50debc..1c787e982 100644
--- a/core/vm_env.go
+++ b/core/vm_env.go
@@ -25,6 +25,21 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
)
+// GetHashFn returns a function for which the VM env can query block hashes thru
+// up to the limit defined by the Yellow Paper and uses the given block chain
+// to query for information.
+func GetHashFn(ref common.Hash, chain *BlockChain) func(n uint64) common.Hash {
+ return func(n uint64) common.Hash {
+ for block := chain.GetBlock(ref); block != nil; block = chain.GetBlock(block.ParentHash()) {
+ if block.NumberU64() == n {
+ return block.Hash()
+ }
+ }
+
+ return common.Hash{}
+ }
+}
+
type VMEnv struct {
state *state.StateDB
header *types.Header
@@ -32,17 +47,20 @@ type VMEnv struct {
depth int
chain *BlockChain
typ vm.Type
+
+ getHashFn func(uint64) common.Hash
// structured logging
logs []vm.StructLog
}
func NewEnv(state *state.StateDB, chain *BlockChain, msg Message, header *types.Header) *VMEnv {
return &VMEnv{
- chain: chain,
- state: state,
- header: header,
- msg: msg,
- typ: vm.StdVmTy,
+ chain: chain,
+ state: state,
+ header: header,
+ msg: msg,
+ typ: vm.StdVmTy,
+ getHashFn: GetHashFn(header.ParentHash, chain),
}
}
@@ -59,13 +77,7 @@ func (self *VMEnv) SetDepth(i int) { self.depth = i }
func (self *VMEnv) VmType() vm.Type { return self.typ }
func (self *VMEnv) SetVmType(t vm.Type) { self.typ = t }
func (self *VMEnv) GetHash(n uint64) common.Hash {
- for block := self.chain.GetBlock(self.header.ParentHash); block != nil; block = self.chain.GetBlock(block.ParentHash()) {
- if block.NumberU64() == n {
- return block.Hash()
- }
- }
-
- return common.Hash{}
+ return self.getHashFn(n)
}
func (self *VMEnv) AddLog(log *vm.Log) {
diff --git a/eth/api.go b/eth/api.go
index 617643e10..37b033dc6 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -47,10 +47,7 @@ import (
"gopkg.in/fatih/set.v0"
)
-const (
- defaultGasPrice = uint64(10000000000000)
- defaultGas = uint64(90000)
-)
+const defaultGas = uint64(90000)
// blockByNumber is a commonly used helper function which retrieves and returns
// the block for the given block number, capable of handling two special blocks:
@@ -152,21 +149,79 @@ func (s *PublicEthereumAPI) Hashrate() *rpc.HexNumber {
}
// Syncing returns false in case the node is currently not synching with the network. It can be up to date or has not
-// yet received the latest block headers from its pears. In case it is synchronizing an object with 3 properties is
-// returned:
+// yet received the latest block headers from its pears. In case it is synchronizing:
// - startingBlock: block number this node started to synchronise from
-// - currentBlock: block number this node is currently importing
-// - highestBlock: block number of the highest block header this node has received from peers
+// - currentBlock: block number this node is currently importing
+// - highestBlock: block number of the highest block header this node has received from peers
+// - pulledStates: number of state entries processed until now
+// - knownStates: number of known state entries that still need to be pulled
func (s *PublicEthereumAPI) Syncing() (interface{}, error) {
- origin, current, height := s.e.Downloader().Progress()
- if current < height {
- return map[string]interface{}{
- "startingBlock": rpc.NewHexNumber(origin),
- "currentBlock": rpc.NewHexNumber(current),
- "highestBlock": rpc.NewHexNumber(height),
- }, nil
+ origin, current, height, pulled, known := s.e.Downloader().Progress()
+
+ // Return not syncing if the synchronisation already completed
+ if current >= height {
+ return false, nil
}
- return false, nil
+ // Otherwise gather the block sync stats
+ return map[string]interface{}{
+ "startingBlock": rpc.NewHexNumber(origin),
+ "currentBlock": rpc.NewHexNumber(current),
+ "highestBlock": rpc.NewHexNumber(height),
+ "pulledStates": rpc.NewHexNumber(pulled),
+ "knownStates": rpc.NewHexNumber(known),
+ }, nil
+}
+
+// PublicMinerAPI provides an API to control the miner.
+// It offers only methods that operate on data that pose no security risk when it is publicly accessible.
+type PublicMinerAPI struct {
+ e *Ethereum
+ agent *miner.RemoteAgent
+}
+
+// NewPublicMinerAPI create a new PublicMinerAPI instance.
+func NewPublicMinerAPI(e *Ethereum) *PublicMinerAPI {
+ agent := miner.NewRemoteAgent()
+ e.Miner().Register(agent)
+
+ return &PublicMinerAPI{e, agent}
+}
+
+// Mining returns an indication if this node is currently mining.
+func (s *PublicMinerAPI) Mining() bool {
+ return s.e.IsMining()
+}
+
+// SubmitWork can be used by external miner to submit their POW solution. It returns an indication if the work was
+// accepted. Note, this is not an indication if the provided work was valid!
+func (s *PublicMinerAPI) SubmitWork(nonce rpc.HexNumber, solution, digest common.Hash) bool {
+ return s.agent.SubmitWork(nonce.Uint64(), digest, solution)
+}
+
+// GetWork returns a work package for external miner. The work package consists of 3 strings
+// result[0], 32 bytes hex encoded current block header pow-hash
+// result[1], 32 bytes hex encoded seed hash used for DAG
+// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
+func (s *PublicMinerAPI) GetWork() ([]string, error) {
+ if !s.e.IsMining() {
+ if err := s.e.StartMining(0, ""); err != nil {
+ return nil, err
+ }
+ }
+ if work, err := s.agent.GetWork(); err == nil {
+ return work[:], nil
+ } else {
+ glog.Infof("%v\n", err)
+ }
+ return nil, fmt.Errorf("mining not ready")
+}
+
+// SubmitHashrate can be used for remote miners to submit their hash rate. This enables the node to report the combined
+// hash rate of all miners which submit work through this node. It accepts the miner hash rate and an identifier which
+// must be unique between nodes.
+func (s *PublicMinerAPI) SubmitHashrate(hashrate rpc.HexNumber, id common.Hash) bool {
+ s.agent.SubmitHashrate(id, hashrate.Uint64())
+ return true
}
// PrivateMinerAPI provides private RPC methods to control the miner.
@@ -762,6 +817,7 @@ func newRPCTransaction(b *types.Block, txHash common.Hash) (*RPCTransaction, err
type PublicTransactionPoolAPI struct {
eventMux *event.TypeMux
chainDb ethdb.Database
+ gpo *GasPriceOracle
bc *core.BlockChain
miner *miner.Miner
am *accounts.Manager
@@ -770,14 +826,15 @@ type PublicTransactionPoolAPI struct {
}
// NewPublicTransactionPoolAPI creates a new RPC service with methods specific for the transaction pool.
-func NewPublicTransactionPoolAPI(txPool *core.TxPool, m *miner.Miner, chainDb ethdb.Database, eventMux *event.TypeMux, bc *core.BlockChain, am *accounts.Manager) *PublicTransactionPoolAPI {
+func NewPublicTransactionPoolAPI(e *Ethereum) *PublicTransactionPoolAPI {
return &PublicTransactionPoolAPI{
- eventMux: eventMux,
- chainDb: chainDb,
- bc: bc,
- am: am,
- txPool: txPool,
- miner: m,
+ eventMux: e.EventMux(),
+ gpo: NewGasPriceOracle(e),
+ chainDb: e.ChainDb(),
+ bc: e.BlockChain(),
+ am: e.AccountManager(),
+ txPool: e.TxPool(),
+ miner: e.Miner(),
}
}
@@ -970,7 +1027,7 @@ func (s *PublicTransactionPoolAPI) SendTransaction(args SendTxArgs) (common.Hash
args.Gas = rpc.NewHexNumber(defaultGas)
}
if args.GasPrice == nil {
- args.GasPrice = rpc.NewHexNumber(defaultGasPrice)
+ args.GasPrice = rpc.NewHexNumber(s.gpo.SuggestPrice())
}
if args.Value == nil {
args.Value = rpc.NewHexNumber(0)
@@ -999,7 +1056,7 @@ func (s *PublicTransactionPoolAPI) SendTransaction(args SendTxArgs) (common.Hash
s.txPool.SetLocal(signedTx)
if err := s.txPool.Add(signedTx); err != nil {
- return common.Hash{}, nil
+ return common.Hash{}, err
}
if contractCreation {
@@ -1111,7 +1168,7 @@ func (tx *Tx) UnmarshalJSON(b []byte) (err error) {
tx.GasLimit = rpc.NewHexNumber(0)
}
if tx.GasPrice == nil {
- tx.GasPrice = rpc.NewHexNumber(defaultGasPrice)
+ tx.GasPrice = rpc.NewHexNumber(int64(50000000000))
}
if contractCreation {
@@ -1154,7 +1211,7 @@ func (s *PublicTransactionPoolAPI) SignTransaction(args *SignTransactionArgs) (*
args.Gas = rpc.NewHexNumber(defaultGas)
}
if args.GasPrice == nil {
- args.GasPrice = rpc.NewHexNumber(defaultGasPrice)
+ args.GasPrice = rpc.NewHexNumber(s.gpo.SuggestPrice())
}
if args.Value == nil {
args.Value = rpc.NewHexNumber(0)
diff --git a/eth/backend.go b/eth/backend.go
index 352522f61..2f0bc3ee5 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -269,12 +269,12 @@ func (s *Ethereum) APIs() []rpc.API {
}, {
Namespace: "eth",
Version: "1.0",
- Service: NewPublicTransactionPoolAPI(s.TxPool(), s.Miner(), s.ChainDb(), s.EventMux(), s.BlockChain(), s.AccountManager()),
+ Service: NewPublicTransactionPoolAPI(s),
Public: true,
}, {
Namespace: "eth",
Version: "1.0",
- Service: miner.NewPublicMinerAPI(s.Miner()),
+ Service: NewPublicMinerAPI(s),
Public: true,
}, {
Namespace: "eth",
diff --git a/eth/downloader/api.go b/eth/downloader/api.go
index cc79e669f..6df911fee 100644
--- a/eth/downloader/api.go
+++ b/eth/downloader/api.go
@@ -36,6 +36,8 @@ type Progress struct {
Origin uint64 `json:"startingBlock"`
Current uint64 `json:"currentBlock"`
Height uint64 `json:"highestBlock"`
+ Pulled uint64 `json:"pulledStates"`
+ Known uint64 `json:"knownStates"`
}
// SyncingResult provides information about the current synchronisation status for this node.
@@ -44,7 +46,7 @@ type SyncingResult struct {
Status Progress `json:"status"`
}
-// Syncing provides information when this nodes starts synchronising with the Ethereumn network and when it's finished.
+// Syncing provides information when this nodes starts synchronising with the Ethereum network and when it's finished.
func (s *PublicDownloaderAPI) Syncing() (rpc.Subscription, error) {
sub := s.d.mux.Subscribe(StartEvent{}, DoneEvent{}, FailedEvent{})
@@ -52,13 +54,12 @@ func (s *PublicDownloaderAPI) Syncing() (rpc.Subscription, error) {
switch event.(type) {
case StartEvent:
result := &SyncingResult{Syncing: true}
- result.Status.Origin, result.Status.Current, result.Status.Height = s.d.Progress()
+ result.Status.Origin, result.Status.Current, result.Status.Height, result.Status.Pulled, result.Status.Known = s.d.Progress()
return result
case DoneEvent, FailedEvent:
return false
}
return nil
}
-
return rpc.NewSubscriptionWithOutputFormat(sub, output), nil
}
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 163994730..6dad6a2cd 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -59,7 +59,6 @@ var (
maxQueuedHashes = 256 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection)
maxQueuedHeaders = 256 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
- maxQueuedStates = 256 * 1024 // [eth/63] Maximum number of state requests to queue (DOS protection)
maxResultsProcess = 256 // Number of download results to import at once into the chain
fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
@@ -197,7 +196,15 @@ func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, ha
// Progress retrieves the synchronisation boundaries, specifically the origin
// block where synchronisation started at (may have failed/suspended); the block
// or header sync is currently at; and the latest known block which the sync targets.
-func (d *Downloader) Progress() (uint64, uint64, uint64) {
+//
+// In addition, during the state download phase of fast synchonisation the number
+// of processed and the total number of known states are also returned. Otherwise
+// these are zero.
+func (d *Downloader) Progress() (uint64, uint64, uint64, uint64, uint64) {
+ // Fetch the pending state count outside of the lock to prevent unforeseen deadlocks
+ pendingStates := uint64(d.queue.PendingNodeData())
+
+ // Lock the current stats and return the progress
d.syncStatsLock.RLock()
defer d.syncStatsLock.RUnlock()
@@ -210,7 +217,7 @@ func (d *Downloader) Progress() (uint64, uint64, uint64) {
case LightSync:
current = d.headHeader().Number.Uint64()
}
- return d.syncStatsChainOrigin, current, d.syncStatsChainHeight
+ return d.syncStatsChainOrigin, current, d.syncStatsChainHeight, d.syncStatsStateDone, d.syncStatsStateDone + pendingStates
}
// Synchronising returns whether the downloader is currently retrieving blocks.
@@ -297,7 +304,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
default:
}
}
- // Reset and ephemeral sync statistics
+ // Reset any ephemeral sync statistics
d.syncStatsLock.Lock()
d.syncStatsStateTotal = 0
d.syncStatsStateDone = 0
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 418243b20..993190c38 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -1301,7 +1301,7 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
<-progress
}
// Retrieve the sync progress and ensure they are zero (pristine sync)
- if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 {
t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, 0)
}
// Synchronise half the blocks and check initial progress
@@ -1316,7 +1316,7 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
}
}()
<-starting
- if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(targetBlocks/2+1) {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(targetBlocks/2+1) {
t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, targetBlocks/2+1)
}
progress <- struct{}{}
@@ -1333,14 +1333,14 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
}
}()
<-starting
- if origin, current, latest := tester.downloader.Progress(); origin != uint64(targetBlocks/2+1) || current != uint64(targetBlocks/2+1) || latest != uint64(targetBlocks) {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin != uint64(targetBlocks/2+1) || current != uint64(targetBlocks/2+1) || latest != uint64(targetBlocks) {
t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, targetBlocks/2+1, targetBlocks/2+1, targetBlocks)
}
progress <- struct{}{}
pending.Wait()
// Check final progress after successful sync
- if origin, current, latest := tester.downloader.Progress(); origin != uint64(targetBlocks/2+1) || current != uint64(targetBlocks) || latest != uint64(targetBlocks) {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin != uint64(targetBlocks/2+1) || current != uint64(targetBlocks) || latest != uint64(targetBlocks) {
t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, targetBlocks/2+1, targetBlocks, targetBlocks)
}
}
@@ -1373,7 +1373,7 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
<-progress
}
// Retrieve the sync progress and ensure they are zero (pristine sync)
- if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 {
t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, 0)
}
// Synchronise with one of the forks and check progress
@@ -1388,7 +1388,7 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
}
}()
<-starting
- if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(len(hashesA)-1) {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(len(hashesA)-1) {
t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, len(hashesA)-1)
}
progress <- struct{}{}
@@ -1408,14 +1408,14 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
}
}()
<-starting
- if origin, current, latest := tester.downloader.Progress(); origin != uint64(common) || current != uint64(len(hashesA)-1) || latest != uint64(len(hashesB)-1) {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin != uint64(common) || current != uint64(len(hashesA)-1) || latest != uint64(len(hashesB)-1) {
t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, common, len(hashesA)-1, len(hashesB)-1)
}
progress <- struct{}{}
pending.Wait()
// Check final progress after successful sync
- if origin, current, latest := tester.downloader.Progress(); origin != uint64(common) || current != uint64(len(hashesB)-1) || latest != uint64(len(hashesB)-1) {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin != uint64(common) || current != uint64(len(hashesB)-1) || latest != uint64(len(hashesB)-1) {
t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, common, len(hashesB)-1, len(hashesB)-1)
}
}
@@ -1448,7 +1448,7 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
<-progress
}
// Retrieve the sync progress and ensure they are zero (pristine sync)
- if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 {
t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, 0)
}
// Attempt a full sync with a faulty peer
@@ -1468,7 +1468,7 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
}
}()
<-starting
- if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(targetBlocks) {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(targetBlocks) {
t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, targetBlocks)
}
progress <- struct{}{}
@@ -1485,14 +1485,14 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
}
}()
<-starting
- if origin, current, latest := tester.downloader.Progress(); origin != 0 || current > uint64(targetBlocks/2) || latest != uint64(targetBlocks) {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current > uint64(targetBlocks/2) || latest != uint64(targetBlocks) {
t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", origin, current, latest, 0, targetBlocks/2, targetBlocks)
}
progress <- struct{}{}
pending.Wait()
// Check final progress after successful sync
- if origin, current, latest := tester.downloader.Progress(); origin > uint64(targetBlocks/2) || current != uint64(targetBlocks) || latest != uint64(targetBlocks) {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin > uint64(targetBlocks/2) || current != uint64(targetBlocks) || latest != uint64(targetBlocks) {
t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", origin, current, latest, targetBlocks/2, targetBlocks, targetBlocks)
}
}
@@ -1524,7 +1524,7 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
<-progress
}
// Retrieve the sync progress and ensure they are zero (pristine sync)
- if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 {
t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, 0)
}
// Create and sync with an attacker that promises a higher chain than available
@@ -1545,7 +1545,7 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
}
}()
<-starting
- if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(targetBlocks+3) {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(targetBlocks+3) {
t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, targetBlocks+3)
}
progress <- struct{}{}
@@ -1562,14 +1562,14 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
}
}()
<-starting
- if origin, current, latest := tester.downloader.Progress(); origin != 0 || current > uint64(targetBlocks) || latest != uint64(targetBlocks) {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin != 0 || current > uint64(targetBlocks) || latest != uint64(targetBlocks) {
t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", origin, current, latest, 0, targetBlocks, targetBlocks)
}
progress <- struct{}{}
pending.Wait()
// Check final progress after successful sync
- if origin, current, latest := tester.downloader.Progress(); origin > uint64(targetBlocks) || current != uint64(targetBlocks) || latest != uint64(targetBlocks) {
+ if origin, current, latest, _, _ := tester.downloader.Progress(); origin > uint64(targetBlocks) || current != uint64(targetBlocks) || latest != uint64(targetBlocks) {
t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", origin, current, latest, targetBlocks, targetBlocks, targetBlocks)
}
}
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index 1e55560db..9d0f2914d 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -39,7 +39,8 @@ import (
)
var (
- blockCacheLimit = 1024 // Maximum number of blocks to cache before throttling the download
+ blockCacheLimit = 1024 // Maximum number of blocks to cache before throttling the download
+ maxInFlightStates = 4096 // Maximum number of state downloads to allow concurrently
)
var (
@@ -464,7 +465,7 @@ func (q *queue) ReserveNodeData(p *peer, count int) *fetchRequest {
q.lock.Lock()
defer q.lock.Unlock()
- return q.reserveHashes(p, count, q.stateTaskQueue, generator, q.statePendPool, count)
+ return q.reserveHashes(p, count, q.stateTaskQueue, generator, q.statePendPool, maxInFlightStates)
}
// reserveHashes reserves a set of hashes for the given peer, skipping previously
diff --git a/eth/filters/api.go b/eth/filters/api.go
index aa4c305a6..148daa649 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -206,12 +206,12 @@ func (s *PublicFilterAPI) newLogFilter(earliest, latest int64, addresses []commo
filter.SetEndBlock(latest)
filter.SetAddresses(addresses)
filter.SetTopics(topics)
- filter.LogsCallback = func(logs vm.Logs) {
+ filter.LogCallback = func(log *vm.Log, removed bool) {
s.logMu.Lock()
defer s.logMu.Unlock()
if queue := s.logQueue[id]; queue != nil {
- queue.add(logs...)
+ queue.add(vmlog{log, removed})
}
}
@@ -365,14 +365,14 @@ func (s *PublicFilterAPI) NewFilter(args NewFilterArgs) (string, error) {
}
// GetLogs returns the logs matching the given argument.
-func (s *PublicFilterAPI) GetLogs(args NewFilterArgs) vm.Logs {
+func (s *PublicFilterAPI) GetLogs(args NewFilterArgs) []vmlog {
filter := New(s.chainDb)
filter.SetBeginBlock(args.FromBlock.Int64())
filter.SetEndBlock(args.ToBlock.Int64())
filter.SetAddresses(args.Addresses)
filter.SetTopics(args.Topics)
- return returnLogs(filter.Find())
+ return toRPCLogs(filter.Find(), false)
}
// UninstallFilter removes the filter with the given filter id.
@@ -447,7 +447,7 @@ func (s *PublicFilterAPI) transactionFilterChanged(id int) []common.Hash {
}
// logFilterChanged returns a collection of logs for the log filter with the given id.
-func (s *PublicFilterAPI) logFilterChanged(id int) vm.Logs {
+func (s *PublicFilterAPI) logFilterChanged(id int) []vmlog {
s.logMu.Lock()
defer s.logMu.Unlock()
@@ -458,17 +458,17 @@ func (s *PublicFilterAPI) logFilterChanged(id int) vm.Logs {
}
// GetFilterLogs returns the logs for the filter with the given id.
-func (s *PublicFilterAPI) GetFilterLogs(filterId string) vm.Logs {
+func (s *PublicFilterAPI) GetFilterLogs(filterId string) []vmlog {
id, ok := s.filterMapping[filterId]
if !ok {
- return returnLogs(nil)
+ return toRPCLogs(nil, false)
}
if filter := s.filterManager.Get(id); filter != nil {
- return returnLogs(filter.Find())
+ return toRPCLogs(filter.Find(), false)
}
- return returnLogs(nil)
+ return toRPCLogs(nil, false)
}
// GetFilterChanges returns the logs for the filter with the given id since last time is was called.
@@ -488,28 +488,33 @@ func (s *PublicFilterAPI) GetFilterChanges(filterId string) interface{} {
case transactionFilterTy:
return returnHashes(s.transactionFilterChanged(id))
case logFilterTy:
- return returnLogs(s.logFilterChanged(id))
+ return s.logFilterChanged(id)
}
return []interface{}{}
}
+type vmlog struct {
+ *vm.Log
+ Removed bool `json:"removed"`
+}
+
type logQueue struct {
mu sync.Mutex
- logs vm.Logs
+ logs []vmlog
timeout time.Time
id int
}
-func (l *logQueue) add(logs ...*vm.Log) {
+func (l *logQueue) add(logs ...vmlog) {
l.mu.Lock()
defer l.mu.Unlock()
l.logs = append(l.logs, logs...)
}
-func (l *logQueue) get() vm.Logs {
+func (l *logQueue) get() []vmlog {
l.mu.Lock()
defer l.mu.Unlock()
@@ -556,13 +561,16 @@ func newFilterId() (string, error) {
return "0x" + hex.EncodeToString(subid[:]), nil
}
-// returnLogs is a helper that will return an empty logs array case the given logs is nil, otherwise is will return the
-// given logs. The RPC interfaces defines that always an array is returned.
-func returnLogs(logs vm.Logs) vm.Logs {
- if logs == nil {
- return vm.Logs{}
+// toRPCLogs is a helper that will convert a vm.Logs array to an structure which
+// can hold additional information about the logs such as whether it was deleted.
+// Additionally when nil is given it will by default instead create an empty slice
+// instead. This is required by the RPC specification.
+func toRPCLogs(logs vm.Logs, removed bool) []vmlog {
+ convertedLogs := make([]vmlog, len(logs))
+ for i, log := range logs {
+ convertedLogs[i] = vmlog{Log: log, Removed: removed}
}
- return logs
+ return convertedLogs
}
// returnHashes is a helper that will return an empty hash array case the given hash array is nil, otherwise is will
diff --git a/eth/filters/filter.go b/eth/filters/filter.go
index ff192cdf6..2c92d20b1 100644
--- a/eth/filters/filter.go
+++ b/eth/filters/filter.go
@@ -39,7 +39,7 @@ type Filter struct {
BlockCallback func(*types.Block, vm.Logs)
TransactionCallback func(*types.Transaction)
- LogsCallback func(vm.Logs)
+ LogCallback func(*vm.Log, bool)
}
// Create a new filter which uses a bloom filter on blocks to figure out whether a particular block
diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go
index df3ce90c6..04e58a08c 100644
--- a/eth/filters/filter_system.go
+++ b/eth/filters/filter_system.go
@@ -46,6 +46,7 @@ func NewFilterSystem(mux *event.TypeMux) *FilterSystem {
}
fs.sub = mux.Subscribe(
//core.PendingBlockEvent{},
+ core.RemovedLogEvent{},
core.ChainEvent{},
core.TxPreEvent{},
vm.Logs(nil),
@@ -96,7 +97,7 @@ func (fs *FilterSystem) filterLoop() {
case core.ChainEvent:
fs.filterMu.RLock()
for id, filter := range fs.filters {
- if filter.BlockCallback != nil && fs.created[id].Before(event.Time) {
+ if filter.BlockCallback != nil && !fs.created[id].After(event.Time) {
filter.BlockCallback(ev.Block, ev.Logs)
}
}
@@ -105,7 +106,7 @@ func (fs *FilterSystem) filterLoop() {
case core.TxPreEvent:
fs.filterMu.RLock()
for id, filter := range fs.filters {
- if filter.TransactionCallback != nil && fs.created[id].Before(event.Time) {
+ if filter.TransactionCallback != nil && !fs.created[id].After(event.Time) {
filter.TransactionCallback(ev.Tx)
}
}
@@ -114,10 +115,20 @@ func (fs *FilterSystem) filterLoop() {
case vm.Logs:
fs.filterMu.RLock()
for id, filter := range fs.filters {
- if filter.LogsCallback != nil && fs.created[id].Before(event.Time) {
- msgs := filter.FilterLogs(ev)
- if len(msgs) > 0 {
- filter.LogsCallback(msgs)
+ if filter.LogCallback != nil && !fs.created[id].After(event.Time) {
+ for _, log := range filter.FilterLogs(ev) {
+ filter.LogCallback(log, false)
+ }
+ }
+ }
+ fs.filterMu.RUnlock()
+
+ case core.RemovedLogEvent:
+ fs.filterMu.RLock()
+ for id, filter := range fs.filters {
+ if filter.LogCallback != nil && !fs.created[id].After(event.Time) {
+ for _, removedLog := range ev.Logs {
+ filter.LogCallback(removedLog, true)
}
}
}
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
new file mode 100644
index 000000000..7ddeb02bc
--- /dev/null
+++ b/eth/filters/filter_system_test.go
@@ -0,0 +1,87 @@
+package filters
+
+import (
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/event"
+)
+
+func TestCallbacks(t *testing.T) {
+ var (
+ mux event.TypeMux
+ fs = NewFilterSystem(&mux)
+ blockDone = make(chan struct{})
+ txDone = make(chan struct{})
+ logDone = make(chan struct{})
+ removedLogDone = make(chan struct{})
+ )
+
+ blockFilter := &Filter{
+ BlockCallback: func(*types.Block, vm.Logs) {
+ close(blockDone)
+ },
+ }
+ txFilter := &Filter{
+ TransactionCallback: func(*types.Transaction) {
+ close(txDone)
+ },
+ }
+ logFilter := &Filter{
+ LogCallback: func(l *vm.Log, oob bool) {
+ if !oob {
+ close(logDone)
+ }
+ },
+ }
+
+ removedLogFilter := &Filter{
+ LogCallback: func(l *vm.Log, oob bool) {
+ if oob {
+ close(removedLogDone)
+ }
+ },
+ }
+
+ fs.Add(blockFilter)
+ fs.Add(txFilter)
+ fs.Add(logFilter)
+ fs.Add(removedLogFilter)
+
+ mux.Post(core.ChainEvent{})
+ mux.Post(core.TxPreEvent{})
+ mux.Post(core.RemovedLogEvent{vm.Logs{&vm.Log{}}})
+ mux.Post(vm.Logs{&vm.Log{}})
+
+ const dura = 5 * time.Second
+ failTimer := time.NewTimer(dura)
+ select {
+ case <-blockDone:
+ case <-failTimer.C:
+ t.Error("block filter failed to trigger (timeout)")
+ }
+
+ failTimer.Reset(dura)
+ select {
+ case <-txDone:
+ case <-failTimer.C:
+ t.Error("transaction filter failed to trigger (timeout)")
+ }
+
+ failTimer.Reset(dura)
+ select {
+ case <-logDone:
+ case <-failTimer.C:
+ t.Error("log filter failed to trigger (timeout)")
+ }
+
+ failTimer.Reset(dura)
+ select {
+ case <-removedLogDone:
+ case <-failTimer.C:
+ t.Error("removed log filter failed to trigger (timeout)")
+ }
+}
diff --git a/jsre/ethereum_js.go b/jsre/ethereum_js.go
index 94e4fde82..6d5675036 100644
--- a/jsre/ethereum_js.go
+++ b/jsre/ethereum_js.go
@@ -653,7 +653,7 @@ module.exports = SolidityTypeBytes;
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file coder.js
* @author Marek Kotewicz <marek@ethdev.com>
* @date 2015
@@ -683,7 +683,7 @@ var SolidityCoder = function (types) {
*
* @method _requireType
* @param {String} type
- * @returns {SolidityType}
+ * @returns {SolidityType}
* @throws {Error} throws if no matching type is found
*/
SolidityCoder.prototype._requireType = function (type) {
@@ -731,7 +731,7 @@ SolidityCoder.prototype.encodeParams = function (types, params) {
return acc + roundedStaticPartLength;
}, 0);
- var result = this.encodeMultiWithOffset(types, solidityTypes, encodeds, dynamicOffset);
+ var result = this.encodeMultiWithOffset(types, solidityTypes, encodeds, dynamicOffset);
return result;
};
@@ -756,7 +756,7 @@ SolidityCoder.prototype.encodeMultiWithOffset = function (types, solidityTypes,
// TODO: figure out nested arrays
});
-
+
types.forEach(function (type, i) {
if (isDynamic(i)) {
var e = self.encodeWithOffset(types[i], solidityTypes[i], encodeds[i], dynamicOffset);
@@ -776,7 +776,7 @@ SolidityCoder.prototype.encodeWithOffset = function (type, solidityType, encoded
var nestedName = solidityType.nestedName(type);
var nestedStaticPartLength = solidityType.staticPartLength(nestedName);
var result = encoded[0];
-
+
(function () {
var previousLength = 2; // in int
if (solidityType.isDynamicArray(nestedName)) {
@@ -786,7 +786,7 @@ SolidityCoder.prototype.encodeWithOffset = function (type, solidityType, encoded
}
}
})();
-
+
// first element is length, skip it
(function () {
for (var i = 0; i < encoded.length - 1; i++) {
@@ -797,7 +797,7 @@ SolidityCoder.prototype.encodeWithOffset = function (type, solidityType, encoded
return result;
})();
-
+
} else if (solidityType.isStaticArray(type)) {
return (function () {
var nestedName = solidityType.nestedName(type);
@@ -810,7 +810,7 @@ SolidityCoder.prototype.encodeWithOffset = function (type, solidityType, encoded
var previousLength = 0; // in int
for (var i = 0; i < encoded.length; i++) {
// calculate length of previous item
- previousLength += +(encoded[i - 1] || [])[0] || 0;
+ previousLength += +(encoded[i - 1] || [])[0] || 0;
result += f.formatInputInt(offset + i * nestedStaticPartLength + previousLength * 32).encode();
}
})();
@@ -853,7 +853,7 @@ SolidityCoder.prototype.decodeParam = function (type, bytes) {
SolidityCoder.prototype.decodeParams = function (types, bytes) {
var solidityTypes = this.getSolidityTypes(types);
var offsets = this.getOffsets(types, solidityTypes);
-
+
return solidityTypes.map(function (solidityType, index) {
return solidityType.decode(bytes, offsets[index], types[index], index);
});
@@ -863,16 +863,16 @@ SolidityCoder.prototype.getOffsets = function (types, solidityTypes) {
var lengths = solidityTypes.map(function (solidityType, index) {
return solidityType.staticPartLength(types[index]);
});
-
+
for (var i = 1; i < lengths.length; i++) {
// sum with length of previous element
- lengths[i] += lengths[i - 1];
+ lengths[i] += lengths[i - 1];
}
return lengths.map(function (length, index) {
// remove the current length, so the length is sum of previous elements
var staticPartLength = solidityTypes[index].staticPartLength(types[index]);
- return length - staticPartLength;
+ return length - staticPartLength;
});
};
@@ -942,7 +942,7 @@ module.exports = SolidityTypeDynamicBytes;
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file formatters.js
* @author Marek Kotewicz <marek@ethdev.com>
* @date 2015
@@ -1086,7 +1086,7 @@ var formatOutputUInt = function (param) {
* @returns {BigNumber} input bytes formatted to real
*/
var formatOutputReal = function (param) {
- return formatOutputInt(param).dividedBy(new BigNumber(2).pow(128));
+ return formatOutputInt(param).dividedBy(new BigNumber(2).pow(128));
};
/**
@@ -1097,7 +1097,7 @@ var formatOutputReal = function (param) {
* @returns {BigNumber} input bytes formatted to ureal
*/
var formatOutputUReal = function (param) {
- return formatOutputUInt(param).dividedBy(new BigNumber(2).pow(128));
+ return formatOutputUInt(param).dividedBy(new BigNumber(2).pow(128));
};
/**
@@ -1232,7 +1232,7 @@ module.exports = SolidityTypeInt;
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file param.js
* @author Marek Kotewicz <marek@ethdev.com>
* @date 2015
@@ -1251,7 +1251,7 @@ var SolidityParam = function (value, offset) {
/**
* This method should be used to get length of params's dynamic part
- *
+ *
* @method dynamicPartLength
* @returns {Number} length of dynamic part (in bytes)
*/
@@ -1279,7 +1279,7 @@ SolidityParam.prototype.withOffset = function (offset) {
* @param {SolidityParam} result of combination
*/
SolidityParam.prototype.combine = function (param) {
- return new SolidityParam(this.value + param.value);
+ return new SolidityParam(this.value + param.value);
};
/**
@@ -1311,8 +1311,8 @@ SolidityParam.prototype.offsetAsBytes = function () {
*/
SolidityParam.prototype.staticPart = function () {
if (!this.isDynamic()) {
- return this.value;
- }
+ return this.value;
+ }
return this.offsetAsBytes();
};
@@ -1344,7 +1344,7 @@ SolidityParam.prototype.encode = function () {
* @returns {String}
*/
SolidityParam.encodeList = function (params) {
-
+
// updating offsets
var totalOffset = params.length * 32;
var offsetParams = params.map(function (param) {
@@ -1470,13 +1470,13 @@ SolidityType.prototype.staticPartLength = function (name) {
/**
* Should be used to determine if type is dynamic array
- * eg:
+ * eg:
* "type[]" => true
* "type[4]" => false
*
* @method isDynamicArray
* @param {String} name
- * @return {Bool} true if the type is dynamic array
+ * @return {Bool} true if the type is dynamic array
*/
SolidityType.prototype.isDynamicArray = function (name) {
var nestedTypes = this.nestedTypes(name);
@@ -1485,13 +1485,13 @@ SolidityType.prototype.isDynamicArray = function (name) {
/**
* Should be used to determine if type is static array
- * eg:
+ * eg:
* "type[]" => false
* "type[4]" => true
*
* @method isStaticArray
* @param {String} name
- * @return {Bool} true if the type is static array
+ * @return {Bool} true if the type is static array
*/
SolidityType.prototype.isStaticArray = function (name) {
var nestedTypes = this.nestedTypes(name);
@@ -1500,7 +1500,7 @@ SolidityType.prototype.isStaticArray = function (name) {
/**
* Should return length of static array
- * eg.
+ * eg.
* "int[32]" => 32
* "int256[14]" => 14
* "int[2][3]" => 3
@@ -1575,7 +1575,7 @@ SolidityType.prototype.nestedTypes = function (name) {
* Should be used to encode the value
*
* @method encode
- * @param {Object} value
+ * @param {Object} value
* @param {String} name
* @return {String} encoded value
*/
@@ -1589,7 +1589,7 @@ SolidityType.prototype.encode = function (value, name) {
var result = [];
result.push(f.formatInputInt(length).encode());
-
+
value.forEach(function (v) {
result.push(self.encode(v, nestedName));
});
@@ -1665,12 +1665,12 @@ SolidityType.prototype.decode = function (bytes, offset, name) {
return result;
})();
} else if (this.isDynamicType(name)) {
-
+
return (function () {
var dynamicOffset = parseInt('0x' + bytes.substr(offset * 2, 64)); // in bytes
var length = parseInt('0x' + bytes.substr(dynamicOffset * 2, 64)); // in bytes
var roundedLength = Math.floor((length + 31) / 32); // in int
-
+
return self._outputFormatter(new SolidityParam(bytes.substr(dynamicOffset * 2, ( 1 + roundedLength) * 64), 0));
})();
}
@@ -1793,13 +1793,13 @@ if (typeof XMLHttpRequest === 'undefined') {
/**
* Utils
- *
+ *
* @module utils
*/
/**
* Utility functions
- *
+ *
* @class [utils] config
* @constructor
*/
@@ -1866,7 +1866,7 @@ module.exports = {
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file sha3.js
* @author Marek Kotewicz <marek@ethdev.com>
* @date 2015
@@ -2444,7 +2444,7 @@ module.exports={
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file web3.js
* @authors:
* Jeffrey Wilcke <jeff@ethdev.com>
@@ -2584,7 +2584,7 @@ module.exports = Web3;
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file allevents.js
* @author Marek Kotewicz <marek@ethdev.com>
* @date 2014
@@ -2674,7 +2674,7 @@ module.exports = AllSolidityEvents;
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file batch.js
* @author Marek Kotewicz <marek@ethdev.com>
* @date 2015
@@ -2719,7 +2719,7 @@ Batch.prototype.execute = function () {
requests[index].callback(null, (requests[index].format ? requests[index].format(result.result) : result.result));
}
});
- });
+ });
};
module.exports = Batch;
@@ -2742,7 +2742,7 @@ module.exports = Batch;
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file contract.js
* @author Marek Kotewicz <marek@ethdev.com>
* @date 2014
@@ -2804,7 +2804,7 @@ var addEventsToContract = function (contract) {
var All = new AllEvents(contract._eth._requestManager, events, contract.address);
All.attachToContract(contract);
-
+
events.map(function (json) {
return new SolidityEvent(contract._eth._requestManager, json, contract.address);
}).forEach(function (e) {
@@ -2832,7 +2832,7 @@ var checkForContractAddress = function(contract, callback){
// stop watching after 50 blocks (timeout)
if (count > 50) {
-
+
filter.stopWatching();
callbackFired = true;
@@ -2852,7 +2852,7 @@ var checkForContractAddress = function(contract, callback){
if(callbackFired || !code)
return;
-
+
filter.stopWatching();
callbackFired = true;
@@ -2910,7 +2910,7 @@ var ContractFactory = function (eth, abi) {
/**
* Should be called to create new contract on a blockchain
- *
+ *
* @method new
* @param {Any} contract constructor param1 (optional)
* @param {Any} contract constructor param2 (optional)
@@ -2976,14 +2976,14 @@ ContractFactory.prototype.new = function () {
ContractFactory.prototype.at = function (address, callback) {
var contract = new Contract(this.eth, this.abi, address);
- // this functions are not part of prototype,
+ // this functions are not part of prototype,
// because we dont want to spoil the interface
addFunctionsToContract(contract);
addEventsToContract(contract);
-
+
if (callback) {
callback(null, contract);
- }
+ }
return contract;
};
@@ -3041,7 +3041,7 @@ module.exports = ContractFactory;
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file errors.js
* @author Marek Kotewicz <marek@ethdev.com>
* @date 2015
@@ -3081,7 +3081,7 @@ module.exports = {
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file event.js
* @author Marek Kotewicz <marek@ethdev.com>
* @date 2014
@@ -3152,7 +3152,7 @@ SolidityEvent.prototype.signature = function () {
/**
* Should be used to encode indexed params and options to one final object
- *
+ *
* @method encode
* @param {Object} indexed
* @param {Object} options
@@ -3183,7 +3183,7 @@ SolidityEvent.prototype.encode = function (indexed, options) {
if (value === undefined || value === null) {
return null;
}
-
+
if (utils.isArray(value)) {
return value.map(function (v) {
return '0x' + coder.encodeParam(i.type, v);
@@ -3205,17 +3205,17 @@ SolidityEvent.prototype.encode = function (indexed, options) {
* @return {Object} result object with decoded indexed && not indexed params
*/
SolidityEvent.prototype.decode = function (data) {
-
+
data.data = data.data || '';
data.topics = data.topics || [];
var argTopics = this._anonymous ? data.topics : data.topics.slice(1);
var indexedData = argTopics.map(function (topics) { return topics.slice(2); }).join("");
- var indexedParams = coder.decodeParams(this.types(true), indexedData);
+ var indexedParams = coder.decodeParams(this.types(true), indexedData);
var notIndexedData = data.data.slice(2);
var notIndexedParams = coder.decodeParams(this.types(false), notIndexedData);
-
+
var result = formatters.outputLogFormatter(data);
result.event = this.displayName();
result.address = data.address;
@@ -3250,7 +3250,7 @@ SolidityEvent.prototype.execute = function (indexed, options, callback) {
indexed = {};
}
}
-
+
var o = this.encode(indexed, options);
var formatter = this.decode.bind(this);
return new Filter(this._requestManager, o, watches.eth(), formatter, callback);
@@ -3311,7 +3311,7 @@ var extend = function (web3) {
}
};
- ex.formatters = formatters;
+ ex.formatters = formatters;
ex.utils = utils;
ex.Method = Method;
ex.Property = Property;
@@ -3380,7 +3380,7 @@ var getOptions = function (options) {
if (utils.isString(options)) {
return options;
- }
+ }
options = options || {};
@@ -3396,8 +3396,8 @@ var getOptions = function (options) {
to: options.to,
address: options.address,
fromBlock: formatters.inputBlockNumberFormatter(options.fromBlock),
- toBlock: formatters.inputBlockNumberFormatter(options.toBlock)
- };
+ toBlock: formatters.inputBlockNumberFormatter(options.toBlock)
+ };
};
/**
@@ -3405,7 +3405,7 @@ Adds the callback and sets up the methods, to iterate over the results.
@method getLogsAtStart
@param {Object} self
-@param {funciton}
+@param {funciton}
*/
var getLogsAtStart = function(self, callback){
// call getFilterLogs for the first watch callback start
@@ -3573,7 +3573,7 @@ module.exports = Filter;
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file formatters.js
* @author Marek Kotewicz <marek@ethdev.com>
* @author Fabian Vogelsteller <fabian@ethdev.com>
@@ -3640,7 +3640,7 @@ var inputCallFormatter = function (options){
options[key] = utils.fromDecimal(options[key]);
});
- return options;
+ return options;
};
/**
@@ -3665,12 +3665,12 @@ var inputTransactionFormatter = function (options){
options[key] = utils.fromDecimal(options[key]);
});
- return options;
+ return options;
};
/**
* Formats the output of a transaction to its proper values
- *
+ *
* @method outputTransactionFormatter
* @param {Object} tx
* @returns {Object}
@@ -3689,7 +3689,7 @@ var outputTransactionFormatter = function (tx){
/**
* Formats the output of a transaction receipt to its proper values
- *
+ *
* @method outputTransactionReceiptFormatter
* @param {Object} receipt
* @returns {Object}
@@ -3715,7 +3715,7 @@ var outputTransactionReceiptFormatter = function (receipt){
* Formats the output of a block to its proper values
*
* @method outputBlockFormatter
- * @param {Object} block
+ * @param {Object} block
* @returns {Object}
*/
var outputBlockFormatter = function(block) {
@@ -3743,7 +3743,7 @@ var outputBlockFormatter = function(block) {
/**
* Formats the output of a log
- *
+ *
* @method outputLogFormatter
* @param {Object} log object
* @returns {Object} log
@@ -3784,7 +3784,7 @@ var inputPostFormatter = function(post) {
return (topic.indexOf('0x') === 0) ? topic : utils.fromUtf8(topic);
});
- return post;
+ return post;
};
/**
@@ -3836,6 +3836,8 @@ var outputSyncingFormatter = function(result) {
result.startingBlock = utils.toDecimal(result.startingBlock);
result.currentBlock = utils.toDecimal(result.currentBlock);
result.highestBlock = utils.toDecimal(result.highestBlock);
+ result.pulledStates = utils.toDecimal(result.pulledStates);
+ result.knownStates = utils.toDecimal(result.knownStates);
return result;
};
@@ -3971,8 +3973,8 @@ SolidityFunction.prototype.call = function () {
if (!callback) {
var output = this._eth.call(payload, defaultBlock);
return this.unpackOutput(output);
- }
-
+ }
+
var self = this;
this._eth.call(payload, defaultBlock, function (error, output) {
callback(error, self.unpackOutput(output));
@@ -4057,11 +4059,11 @@ SolidityFunction.prototype.request = function () {
var callback = this.extractCallback(args);
var payload = this.toPayload(args);
var format = this.unpackOutput.bind(this);
-
+
return {
method: this._constant ? 'eth_call' : 'eth_sendTransaction',
callback: callback,
- params: [payload],
+ params: [payload],
format: format
};
};
@@ -4193,7 +4195,7 @@ HttpProvider.prototype.send = function (payload) {
try {
result = JSON.parse(result);
} catch(e) {
- throw errors.InvalidResponse(request.responseText);
+ throw errors.InvalidResponse(request.responseText);
}
return result;
@@ -4207,7 +4209,7 @@ HttpProvider.prototype.send = function (payload) {
* @param {Function} callback triggered on end with (err, result)
*/
HttpProvider.prototype.sendAsync = function (payload, callback) {
- var request = this.prepareRequest(true);
+ var request = this.prepareRequest(true);
request.onreadystatechange = function() {
if (request.readyState === 4) {
@@ -4217,13 +4219,13 @@ HttpProvider.prototype.sendAsync = function (payload, callback) {
try {
result = JSON.parse(result);
} catch(e) {
- error = errors.InvalidResponse(request.responseText);
+ error = errors.InvalidResponse(request.responseText);
}
callback(error, result);
}
};
-
+
try {
request.send(JSON.stringify(payload));
} catch(error) {
@@ -4271,7 +4273,7 @@ module.exports = HttpProvider;
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file iban.js
* @author Marek Kotewicz <marek@ethdev.com>
* @date 2015
@@ -4471,7 +4473,7 @@ Iban.prototype.address = function () {
var base36 = this._iban.substr(4);
var asBn = new BigNumber(base36, 36);
return padLeft(asBn.toString(16), 20);
- }
+ }
return '';
};
@@ -4516,7 +4518,7 @@ var IpcProvider = function (path, net) {
var _this = this;
this.responseCallbacks = {};
this.path = path;
-
+
this.connection = net.connect({path: this.path});
this.connection.on('error', function(e){
@@ -4526,7 +4528,7 @@ var IpcProvider = function (path, net) {
this.connection.on('end', function(){
_this._timeout();
- });
+ });
// LISTEN FOR CONNECTION RESPONSES
@@ -4565,7 +4567,7 @@ Will parse the response and make an array out of it.
IpcProvider.prototype._parseResponse = function(data) {
var _this = this,
returnValues = [];
-
+
// DE-CHUNKER
var dechunkedData = data
.replace(/\}\{/g,'}|--|{') // }{
@@ -4669,7 +4671,7 @@ IpcProvider.prototype.send = function (payload) {
try {
result = JSON.parse(data);
} catch(e) {
- throw errors.InvalidResponse(data);
+ throw errors.InvalidResponse(data);
}
return result;
@@ -4850,7 +4852,7 @@ Method.prototype.extractCallback = function (args) {
/**
* Should be called to check if the number of arguments is correct
- *
+ *
* @method validateArgs
* @param {Array} arguments
* @throws {Error} if it is not
@@ -4863,7 +4865,7 @@ Method.prototype.validateArgs = function (args) {
/**
* Should be called to format input args of method
- *
+ *
* @method formatInput
* @param {Array}
* @return {Array}
@@ -4917,7 +4919,7 @@ Method.prototype.attachToObject = function (obj) {
obj[name[0]] = obj[name[0]] || {};
obj[name[0]][name[1]] = func;
} else {
- obj[name[0]] = func;
+ obj[name[0]] = func;
}
};
@@ -4981,8 +4983,8 @@ var DB = function (web3) {
this._requestManager = web3._requestManager;
var self = this;
-
- methods().forEach(function(method) {
+
+ methods().forEach(function(method) {
method.attachToObject(self);
method.setRequestManager(web3._requestManager);
});
@@ -5084,12 +5086,12 @@ function Eth(web3) {
var self = this;
- methods().forEach(function(method) {
+ methods().forEach(function(method) {
method.attachToObject(self);
method.setRequestManager(self._requestManager);
});
- properties().forEach(function(p) {
+ properties().forEach(function(p) {
p.attachToObject(self);
p.setRequestManager(self._requestManager);
});
@@ -5388,7 +5390,7 @@ var Net = function (web3) {
var self = this;
- properties().forEach(function(p) {
+ properties().forEach(function(p) {
p.attachToObject(self);
p.setRequestManager(web3._requestManager);
});
@@ -5444,7 +5446,7 @@ var Shh = function (web3) {
var self = this;
- methods().forEach(function(method) {
+ methods().forEach(function(method) {
method.attachToObject(self);
method.setRequestManager(self._requestManager);
});
@@ -5454,11 +5456,11 @@ Shh.prototype.filter = function (fil, callback) {
return new Filter(this._requestManager, fil, watches.shh(), formatters.outputPostFormatter, callback);
};
-var methods = function () {
+var methods = function () {
var post = new Method({
- name: 'post',
- call: 'shh_post',
+ name: 'post',
+ call: 'shh_post',
params: 1,
inputFormatter: [formatters.inputPostFormatter]
});
@@ -5632,7 +5634,7 @@ module.exports = {
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file namereg.js
* @author Marek Kotewicz <marek@ethdev.com>
* @date 2015
@@ -5697,7 +5699,7 @@ Property.prototype.setRequestManager = function (rm) {
/**
* Should be called to format input args of method
- *
+ *
* @method formatInput
* @param {Array}
* @return {Array}
@@ -5733,7 +5735,7 @@ Property.prototype.extractCallback = function (args) {
/**
* Should attach function to method
- *
+ *
* @method attachToObject
* @param {Object}
* @param {Function}
@@ -5766,7 +5768,7 @@ Property.prototype.buildGet = function () {
return function get() {
return property.formatOutput(property.requestManager.send({
method: property.getter
- }));
+ }));
};
};
@@ -5820,7 +5822,7 @@ module.exports = Property;
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file requestmanager.js
* @author Jeffrey Wilcke <jeff@ethdev.com>
* @author Marek Kotewicz <marek@ethdev.com>
@@ -5887,7 +5889,7 @@ RequestManager.prototype.sendAsync = function (data, callback) {
if (err) {
return callback(err);
}
-
+
if (!Jsonrpc.getInstance().isValidResponse(result)) {
return callback(errors.InvalidResponse(result));
}
@@ -5920,7 +5922,7 @@ RequestManager.prototype.sendBatch = function (data, callback) {
}
callback(err, results);
- });
+ });
};
/**
@@ -6024,7 +6026,7 @@ RequestManager.prototype.poll = function () {
}
var payload = Jsonrpc.getInstance().toBatchPayload(pollsData);
-
+
// map the request id to they poll id
var pollsIdMap = {};
payload.forEach(function(load, index){
@@ -6054,7 +6056,7 @@ RequestManager.prototype.poll = function () {
} else
return false;
}).filter(function (result) {
- return !!result;
+ return !!result;
}).filter(function (result) {
var valid = Jsonrpc.getInstance().isValidResponse(result);
if (!valid) {
@@ -6129,16 +6131,16 @@ var pollSyncing = function(self) {
self.callbacks.forEach(function (callback) {
if (self.lastSyncState !== sync) {
-
+
// call the callback with true first so the app can stop anything, before receiving the sync data
if(!self.lastSyncState && utils.isObject(sync))
callback(null, true);
-
+
// call on the next CPU cycle, so the actions of the sync stop can be processes first
setTimeout(function() {
callback(null, sync);
}, 0);
-
+
self.lastSyncState = sync;
}
});
@@ -6193,7 +6195,7 @@ module.exports = IsSyncing;
You should have received a copy of the GNU Lesser General Public License
along with web3.js. If not, see <http://www.gnu.org/licenses/>.
*/
-/**
+/**
* @file transfer.js
* @author Marek Kotewicz <marek@ethdev.com>
* @date 2015
@@ -6212,7 +6214,7 @@ var exchangeAbi = require('../contracts/SmartExchange.json');
* @param {Function} callback, callback
*/
var transfer = function (eth, from, to, value, callback) {
- var iban = new Iban(to);
+ var iban = new Iban(to);
if (!iban.isValid()) {
throw new Error('invalid iban address');
}
@@ -6220,7 +6222,7 @@ var transfer = function (eth, from, to, value, callback) {
if (iban.isDirect()) {
return transferToAddress(eth, from, iban.address(), value, callback);
}
-
+
if (!callback) {
var address = eth.icapNamereg().addr(iban.institution());
return deposit(eth, from, address, value, iban.client());
@@ -6229,7 +6231,7 @@ var transfer = function (eth, from, to, value, callback) {
eth.icapNamereg().addr(iban.institution(), function (err, address) {
return deposit(eth, from, address, value, iban.client(), callback);
});
-
+
};
/**
diff --git a/miner/api.go b/miner/api.go
deleted file mode 100644
index fae2203f5..000000000
--- a/miner/api.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of go-ethereum.
-//
-// go-ethereum is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// go-ethereum is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
-
-package miner
-
-import (
- "fmt"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/logger/glog"
- "github.com/ethereum/go-ethereum/rpc"
-)
-
-// PublicMinerAPI provides an API to control the miner.
-// It offers only methods that operate on data that pose no security risk when it is publicly accessible.
-type PublicMinerAPI struct {
- miner *Miner
- agent *RemoteAgent
-}
-
-// NewPublicMinerAPI create a new PublicMinerAPI instance.
-func NewPublicMinerAPI(miner *Miner) *PublicMinerAPI {
- agent := NewRemoteAgent()
- miner.Register(agent)
-
- return &PublicMinerAPI{miner, agent}
-}
-
-// Mining returns an indication if this node is currently mining.
-func (s *PublicMinerAPI) Mining() bool {
- return s.miner.Mining()
-}
-
-// SubmitWork can be used by external miner to submit their POW solution. It returns an indication if the work was
-// accepted. Note, this is not an indication if the provided work was valid!
-func (s *PublicMinerAPI) SubmitWork(nonce rpc.HexNumber, solution, digest common.Hash) bool {
- return s.agent.SubmitWork(nonce.Uint64(), digest, solution)
-}
-
-// GetWork returns a work package for external miner. The work package consists of 3 strings
-// result[0], 32 bytes hex encoded current block header pow-hash
-// result[1], 32 bytes hex encoded seed hash used for DAG
-// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
-func (s *PublicMinerAPI) GetWork() ([]string, error) {
- if !s.Mining() {
- s.miner.Start(s.miner.coinbase, 0)
- }
- if work, err := s.agent.GetWork(); err == nil {
- return work[:], nil
- } else {
- glog.Infof("%v\n", err)
- }
- return nil, fmt.Errorf("mining not ready")
-}
-
-// SubmitHashrate can be used for remote miners to submit their hash rate. This enables the node to report the combined
-// hash rate of all miners which submit work through this node. It accepts the miner hash rate and an identifier which
-// must be unique between nodes.
-func (s *PublicMinerAPI) SubmitHashrate(hashrate rpc.HexNumber, id common.Hash) bool {
- s.agent.SubmitHashrate(id, hashrate.Uint64())
- return true
-}