diff options
Diffstat (limited to 'vendor/github.com')
164 files changed, 348940 insertions, 2 deletions
diff --git a/vendor/github.com/Spiderpowa/bls/lib/libbls384.a b/vendor/github.com/Spiderpowa/bls/lib/libbls384.a Binary files differdeleted file mode 100644 index 9e048acda..000000000 --- a/vendor/github.com/Spiderpowa/bls/lib/libbls384.a +++ /dev/null diff --git a/vendor/github.com/dexon-foundation/bls/Makefile b/vendor/github.com/dexon-foundation/bls/Makefile new file mode 100644 index 000000000..8ce73a5a1 --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/Makefile @@ -0,0 +1,140 @@ +include ../mcl/common.mk +LIB_DIR=lib +OBJ_DIR=obj +EXE_DIR=bin +CFLAGS += -std=c++11 +LDFLAGS += -lpthread + +SRC_SRC=bls_c256.cpp bls_c384.cpp +TEST_SRC=bls256_test.cpp bls384_test.cpp bls_c256_test.cpp bls_c384_test.cpp +SAMPLE_SRC=bls256_smpl.cpp bls384_smpl.cpp + +CFLAGS+=-I../mcl/include -I./ +ifneq ($(MCL_MAX_BIT_SIZE),) + CFLAGS+=-DMCL_MAX_BIT_SIZE=$(MCL_MAX_BIT_SIZE) +endif +ifeq ($(DISABLE_THREAD_TEST),1) + CFLAGS+=-DDISABLE_THREAD_TEST +endif + +SHARE_BASENAME_SUF?=_dy + +BLS256_LIB=$(LIB_DIR)/libbls256.a +BLS384_LIB=$(LIB_DIR)/libbls384.a +BLS256_SNAME=bls256$(SHARE_BASENAME_SUF) +BLS384_SNAME=bls384$(SHARE_BASENAME_SUF) +BLS256_SLIB=$(LIB_DIR)/lib$(BLS256_SNAME).$(LIB_SUF) +BLS384_SLIB=$(LIB_DIR)/lib$(BLS384_SNAME).$(LIB_SUF) +all: $(BLS256_LIB) $(BLS256_SLIB) $(BLS384_LIB) $(BLS384_SLIB) + +MCL_LIB=../mcl/lib/libmcl.a + +$(MCL_LIB): + $(MAKE) -C ../mcl lib/libmcl.a + +ifeq ($(DOCKER),alpine) +GMP_PREFIX=/usr/lib +OPENSSL_PREFIX=/usr/lib +else ifeq ($(OS),mac) +GMP_PREFIX=$(shell brew --prefix gmp)/lib +OPENSSL_PREFIX=$(shell brew --prefix openssl)/lib +else +GMP_PREFIX=/usr/lib/x86_64-linux-gnu +OPENSSL_PREFIX=/usr/lib/x86_64-linux-gnu +endif + +GMP_STATIC_LIB=$(GMP_PREFIX)/libgmp.a +GMPXX_STATIC_LIB=$(GMP_PREFIX)/libgmpxx.a + +OPENSSL_STATIC_LIB=$(OPENSSL_PREFIX)/libcrypto.a + +$(BLS256_LIB): $(OBJ_DIR)/bls_c256.o + $(AR) $@ $< +$(BLS384_LIB): $(OBJ_DIR)/bls_c384.o $(MCL_LIB) + rm -rf tmp + mkdir -p tmp + cd tmp && \ + ar x ../$(MCL_LIB) && \ + ar x $(OPENSSL_STATIC_LIB) && \ + ar x $(GMP_STATIC_LIB) && \ + ar x $(GMPXX_STATIC_LIB) + $(AR) $@ $< tmp/*.o + rm -rf tmp + +ifneq ($(findstring $(OS),mac/mingw64),) + BLS256_SLIB_LDFLAGS+=-lgmpxx -lgmp -lcrypto -lstdc++ + BLS384_SLIB_LDFLAGS+=-lgmpxx -lgmp -lcrypto -lstdc++ +endif +ifeq ($(OS),mingw64) + CFLAGS+=-I../mcl + BLS256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BLS256_SNAME).a + BLS384_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BLS384_SNAME).a +endif +$(BLS256_SLIB): $(OBJ_DIR)/bls_c256.o $(MCL_LIB) + $(PRE)$(CXX) -shared -o $@ $< $(MCL_LIB) $(BLS256_SLIB_LDFLAGS) +$(BLS384_SLIB): $(OBJ_DIR)/bls_c384.o $(MCL_LIB) + $(PRE)$(CXX) -shared -o $@ $< $(MCL_LIB) $(BLS384_SLIB_LDFLAGS) + +VPATH=test sample src + +.SUFFIXES: .cpp .d .exe + +$(OBJ_DIR)/%.o: %.cpp + $(PRE)$(CXX) $(CFLAGS) -c $< -o $@ -MMD -MP -MF $(@:.o=.d) + +$(EXE_DIR)/%384_test.exe: $(OBJ_DIR)/%384_test.o $(BLS384_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BLS384_LIB) -lmcl -L../mcl/lib $(LDFLAGS) + +$(EXE_DIR)/%256_test.exe: $(OBJ_DIR)/%256_test.o $(BLS256_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BLS256_LIB) -lmcl -L../mcl/lib $(LDFLAGS) + +# sample exe links libbls256.a +$(EXE_DIR)/%.exe: $(OBJ_DIR)/%.o $(BLS256_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BLS256_LIB) -lmcl -L../mcl/lib $(LDFLAGS) + +SAMPLE_EXE=$(addprefix $(EXE_DIR)/,$(SAMPLE_SRC:.cpp=.exe)) +sample: $(SAMPLE_EXE) + +TEST_EXE=$(addprefix $(EXE_DIR)/,$(TEST_SRC:.cpp=.exe)) +test: $(TEST_EXE) + @echo test $(TEST_EXE) + @sh -ec 'for i in $(TEST_EXE); do $$i|grep "ctest:name"; done' > result.txt + @grep -v "ng=0, exception=0" result.txt; if [ $$? -eq 1 ]; then echo "all unit tests succeed"; else exit 1; fi + $(MAKE) sample_test + +sample_test: $(EXE_DIR)/bls_smpl.exe + python bls_smpl.py + +ifeq ($(OS),mac) + MAC_GO_LDFLAGS="-ldflags=-s" +endif +# PATH is for mingw, LD_RUN_PATH is for linux, DYLD_LIBRARY_PATH is for mac +test_go: ffi/go/bls/bls.go ffi/go/bls/bls_test.go $(BLS384_LIB) + cd ffi/go/bls && go test $(MAC_GO_LDFLAGS) . + +EMCC_OPT=-I./include -I./src -I../mcl/include -I./ -Wall -Wextra +EMCC_OPT+=-O3 -DNDEBUG +EMCC_OPT+=-s WASM=1 -s NO_EXIT_RUNTIME=1 -s MODULARIZE=1 #-s ASSERTIONS=1 +EMCC_OPT+=-DCYBOZU_MINIMUM_EXCEPTION +EMCC_OPT+=-s ABORTING_MALLOC=0 +EMCC_OPT+=-DMCLBN_FP_UNIT_SIZE=6 +JS_DEP=src/bls_c384.cpp ../mcl/src/fp.cpp Makefile + +../bls-wasm/bls_c.js: $(JS_DEP) + emcc -o $@ src/bls_c384.cpp ../mcl/src/fp.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=384 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -DMCL_DONT_USE_CSPRNG -fno-exceptions -MD -MP -MF obj/bls_c384.d + +bls-wasm: + $(MAKE) ../bls-wasm/bls_c.js + +clean: + $(RM) $(OBJ_DIR)/*.d $(OBJ_DIR)/*.o $(EXE_DIR)/*.exe $(GEN_EXE) $(ASM_SRC) $(ASM_OBJ) $(LLVM_SRC) $(BLS256_LIB) $(BLS256_SLIB) $(BLS384_LIB) $(BLS384_SLIB) + +ALL_SRC=$(SRC_SRC) $(TEST_SRC) $(SAMPLE_SRC) +DEPEND_FILE=$(addprefix $(OBJ_DIR)/, $(ALL_SRC:.cpp=.d)) +-include $(DEPEND_FILE) + +.PHONY: test bls-wasm + +# don't remove these files automatically +.SECONDARY: $(addprefix $(OBJ_DIR)/, $(ALL_SRC:.cpp=.o)) + diff --git a/vendor/github.com/dexon-foundation/bls/bls.sln b/vendor/github.com/dexon-foundation/bls/bls.sln new file mode 100644 index 000000000..4889ec601 --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/bls.sln @@ -0,0 +1,30 @@ +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 2013 +VisualStudioVersion = 12.0.40629.0 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bls_test", "test\proj\bls_test\bls_test.vcxproj", "{51266DE6-B57B-4AE3-B85C-282F170E1728}" + ProjectSection(ProjectDependencies) = postProject + {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bls", "src\proj\bls.vcxproj", "{1DBB979A-C212-45CD-9563-446A96F87F71}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Release|x64 = Release|x64 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Debug|x64.ActiveCfg = Debug|x64 + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Debug|x64.Build.0 = Debug|x64 + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Release|x64.ActiveCfg = Release|x64 + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Release|x64.Build.0 = Release|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Debug|x64.ActiveCfg = Debug|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Debug|x64.Build.0 = Debug|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Release|x64.ActiveCfg = Release|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Release|x64.Build.0 = Release|x64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/vendor/github.com/dexon-foundation/bls/bls_smpl.py b/vendor/github.com/dexon-foundation/bls/bls_smpl.py new file mode 100644 index 000000000..f834d80aa --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/bls_smpl.py @@ -0,0 +1,40 @@ +import os, sys, subprocess + +EXE='bin/bls_smpl.exe' + +def init(): + subprocess.check_call([EXE, "init"]) + +def sign(m, i=0): + subprocess.check_call([EXE, "sign", "-m", m, "-id", str(i)]) + +def verify(m, i=0): + subprocess.check_call([EXE, "verify", "-m", m, "-id", str(i)]) + +def share(n, k): + subprocess.check_call([EXE, "share", "-n", str(n), "-k", str(k)]) + +def recover(ids): + cmd = [EXE, "recover", "-ids"] + for i in ids: + cmd.append(str(i)) + subprocess.check_call(cmd) + +def main(): + m = "hello bls threshold signature" + n = 10 + ids = [1, 5, 3, 7] + k = len(ids) + init() + sign(m) + verify(m) + share(n, k) + for i in ids: + sign(m, i) + verify(m, i) + subprocess.check_call(["rm", "sample/sign.txt"]) + recover(ids) + verify(m) + +if __name__ == '__main__': + main() diff --git a/vendor/github.com/dexon-foundation/bls/common.props b/vendor/github.com/dexon-foundation/bls/common.props new file mode 100644 index 000000000..d6fdbb902 --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/common.props @@ -0,0 +1,26 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ImportGroup Label="PropertySheets" /> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup> + <OutDir>$(SolutionDir)bin\</OutDir> + </PropertyGroup> + <ItemDefinitionGroup> + <ClCompile> + <AdditionalIncludeDirectories>$(SolutionDir)../cybozulib/include;$(SolutionDir)../cybozulib_ext/include;$(SolutionDir)include;$(SolutionDir)../mcl/include</AdditionalIncludeDirectories> + </ClCompile> + </ItemDefinitionGroup> + <ItemDefinitionGroup> + <ClCompile> + <WarningLevel>Level4</WarningLevel> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <PrecompiledHeaderFile /> + <PrecompiledHeaderOutputFile /> + <PreprocessorDefinitions>_MBCS;%(PreprocessorDefinitions);NOMINMAX;BLS_MAX_OP_UNIT_SIZE=6</PreprocessorDefinitions> + </ClCompile> + <Link> + <AdditionalLibraryDirectories>$(SolutionDir)../cybozulib_ext/lib;$(SolutionDir)../mcl/lib;$(SolutionDir)lib</AdditionalLibraryDirectories> + </Link> + </ItemDefinitionGroup> + <ItemGroup /> +</Project> diff --git a/vendor/github.com/dexon-foundation/bls/debug.props b/vendor/github.com/dexon-foundation/bls/debug.props new file mode 100644 index 000000000..1553ae0dc --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/debug.props @@ -0,0 +1,14 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ImportGroup Label="PropertySheets" /> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup> + <TargetName>$(ProjectName)d</TargetName> + </PropertyGroup> + <ItemDefinitionGroup> + <ClCompile> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + </ItemDefinitionGroup> + <ItemGroup /> +</Project>
\ No newline at end of file diff --git a/vendor/github.com/Spiderpowa/bls/ffi/go/bls/bls.go b/vendor/github.com/dexon-foundation/bls/ffi/go/bls/bls.go index 4d79430a5..b6ffe49d0 100644 --- a/vendor/github.com/Spiderpowa/bls/ffi/go/bls/bls.go +++ b/vendor/github.com/dexon-foundation/bls/ffi/go/bls/bls.go @@ -15,7 +15,7 @@ import "encoding/json" // call this function before calling all the other operations // this function is not thread safe func Init(curve int) error { - err := C.blsInit(C.int(curve), C.MCLBN_FP_UNIT_SIZE) + err := C.blsInit(C.int(curve), C.MCLBN_COMPILED_TIME_VAR) if err != 0 { return fmt.Errorf("ERR Init curve=%d", curve) } @@ -112,6 +112,16 @@ func (sec *SecretKey) SetLittleEndian(buf []byte) error { return sec.v.SetLittleEndian(buf) } +// SerializeToHexStr -- +func (sec *SecretKey) SerializeToHexStr() string { + return sec.v.GetString(IoSerializeHexStr) +} + +// DeserializeHexStr -- +func (sec *SecretKey) DeserializeHexStr(s string) error { + return sec.v.SetString(s, IoSerializeHexStr) +} + // GetHexString -- func (sec *SecretKey) GetHexString() string { return sec.v.GetString(16) @@ -230,6 +240,16 @@ func (pub *PublicKey) Deserialize(buf []byte) error { return pub.v.Deserialize(buf) } +// SerializeToHexStr -- +func (pub *PublicKey) SerializeToHexStr() string { + return pub.v.GetString(IoSerializeHexStr) +} + +// DeserializeHexStr -- +func (pub *PublicKey) DeserializeHexStr(s string) error { + return pub.v.SetString(s, IoSerializeHexStr) +} + // GetHexString -- func (pub *PublicKey) GetHexString() string { return pub.v.GetString(16) @@ -306,6 +326,16 @@ func (sign *Sign) Deserialize(buf []byte) error { return sign.v.Deserialize(buf) } +// SerializeToHexStr -- +func (sign *Sign) SerializeToHexStr() string { + return sign.v.GetString(IoSerializeHexStr) +} + +// DeserializeHexStr -- +func (sign *Sign) DeserializeHexStr(s string) error { + return sign.v.SetString(s, IoSerializeHexStr) +} + // GetHexString -- func (sign *Sign) GetHexString() string { return sign.v.GetString(16) diff --git a/vendor/github.com/Spiderpowa/bls/ffi/go/bls/mcl.go b/vendor/github.com/dexon-foundation/bls/ffi/go/bls/mcl.go index 713a8cc55..9771201ec 100644 --- a/vendor/github.com/Spiderpowa/bls/ffi/go/bls/mcl.go +++ b/vendor/github.com/dexon-foundation/bls/ffi/go/bls/mcl.go @@ -20,6 +20,9 @@ const CurveFp382_2 = C.mclBn_CurveFp382_2 // BLS12_381 const BLS12_381 = C.MCL_BLS12_381 +// IoSerializeHexStr +const IoSerializeHexStr = C.MCLBN_IO_SERIALIZE_HEX_STR + // GetMaxOpUnitSize -- func GetMaxOpUnitSize() int { return int(C.MCLBN_FP_UNIT_SIZE) diff --git a/vendor/github.com/dexon-foundation/bls/include/bls/bls.h b/vendor/github.com/dexon-foundation/bls/include/bls/bls.h new file mode 100644 index 000000000..6b7cbdd68 --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/include/bls/bls.h @@ -0,0 +1,230 @@ +#pragma once +/** + @file + @brief C interface of bls.hpp + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <mcl/bn.h> + +#ifdef _MSC_VER + #ifdef BLS_DONT_EXPORT + #define BLS_DLL_API + #else + #ifdef BLS_DLL_EXPORT + #define BLS_DLL_API __declspec(dllexport) + #else + #define BLS_DLL_API __declspec(dllimport) + #endif + #endif + #ifndef BLS_NO_AUTOLINK + #if MCLBN_FP_UNIT_SIZE == 4 + #pragma comment(lib, "bls256.lib") + #elif MCLBN_FP_UNIT_SIZE == 6 + #pragma comment(lib, "bls384.lib") + #endif + #endif +#elif defined(__EMSCRIPTEN__) && !defined(BLS_DONT_EXPORT) + #define BLS_DLL_API __attribute__((used)) +#elif defined(__wasm__) && !defined(BLS_DONT_EXPORT) + #define BLS_DLL_API __attribute__((visibility("default"))) +#else + #define BLS_DLL_API +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + mclBnFr v; +} blsId; + +typedef struct { + mclBnFr v; +} blsSecretKey; + +typedef struct { + mclBnG2 v; +} blsPublicKey; + +typedef struct { + mclBnG1 v; +} blsSignature; + +/* + initialize this library + call this once before using the other functions + @param curve [in] enum value defined in mcl/bn.h + @param compiledTimeVar [in] specify MCLBN_COMPILED_TIME_VAR, + which macro is used to make sure that the values + are the same when the library is built and used + @return 0 if success + @note blsInit() is thread safe and serialized if it is called simultaneously + but don't call it while using other functions. +*/ +BLS_DLL_API int blsInit(int curve, int compiledTimeVar); + +BLS_DLL_API void blsIdSetInt(blsId *id, int x); + +// return 0 if success +// mask buf with (1 << (bitLen(r) - 1)) - 1 if buf >= r +BLS_DLL_API int blsSecretKeySetLittleEndian(blsSecretKey *sec, const void *buf, mclSize bufSize); + +BLS_DLL_API void blsGetPublicKey(blsPublicKey *pub, const blsSecretKey *sec); + +// calculate the has of m and sign the hash +BLS_DLL_API void blsSign(blsSignature *sig, const blsSecretKey *sec, const void *m, mclSize size); + +// return 1 if valid +BLS_DLL_API int blsVerify(const blsSignature *sig, const blsPublicKey *pub, const void *m, mclSize size); + +// return written byte size if success else 0 +BLS_DLL_API mclSize blsIdSerialize(void *buf, mclSize maxBufSize, const blsId *id); +BLS_DLL_API mclSize blsSecretKeySerialize(void *buf, mclSize maxBufSize, const blsSecretKey *sec); +BLS_DLL_API mclSize blsPublicKeySerialize(void *buf, mclSize maxBufSize, const blsPublicKey *pub); +BLS_DLL_API mclSize blsSignatureSerialize(void *buf, mclSize maxBufSize, const blsSignature *sig); + +// return read byte size if success else 0 +BLS_DLL_API mclSize blsIdDeserialize(blsId *id, const void *buf, mclSize bufSize); +BLS_DLL_API mclSize blsSecretKeyDeserialize(blsSecretKey *sec, const void *buf, mclSize bufSize); +BLS_DLL_API mclSize blsPublicKeyDeserialize(blsPublicKey *pub, const void *buf, mclSize bufSize); +BLS_DLL_API mclSize blsSignatureDeserialize(blsSignature *sig, const void *buf, mclSize bufSize); + +// return 1 if same else 0 +BLS_DLL_API int blsIdIsEqual(const blsId *lhs, const blsId *rhs); +BLS_DLL_API int blsSecretKeyIsEqual(const blsSecretKey *lhs, const blsSecretKey *rhs); +BLS_DLL_API int blsPublicKeyIsEqual(const blsPublicKey *lhs, const blsPublicKey *rhs); +BLS_DLL_API int blsSignatureIsEqual(const blsSignature *lhs, const blsSignature *rhs); + +// return 0 if success +BLS_DLL_API int blsSecretKeyShare(blsSecretKey *sec, const blsSecretKey* msk, mclSize k, const blsId *id); +BLS_DLL_API int blsPublicKeyShare(blsPublicKey *pub, const blsPublicKey *mpk, mclSize k, const blsId *id); + +BLS_DLL_API int blsSecretKeyRecover(blsSecretKey *sec, const blsSecretKey *secVec, const blsId *idVec, mclSize n); +BLS_DLL_API int blsPublicKeyRecover(blsPublicKey *pub, const blsPublicKey *pubVec, const blsId *idVec, mclSize n); +BLS_DLL_API int blsSignatureRecover(blsSignature *sig, const blsSignature *sigVec, const blsId *idVec, mclSize n); + +// add +BLS_DLL_API void blsSecretKeyAdd(blsSecretKey *sec, const blsSecretKey *rhs); +BLS_DLL_API void blsPublicKeyAdd(blsPublicKey *pub, const blsPublicKey *rhs); +BLS_DLL_API void blsSignatureAdd(blsSignature *sig, const blsSignature *rhs); + +/* + verify whether a point of an elliptic curve has order r + This api affetcs setStr(), deserialize() for G2 on BN or G1/G2 on BLS12 + @param doVerify [in] does not verify if zero(default 1) + Signature = G1, PublicKey = G2 +*/ +BLS_DLL_API void blsSignatureVerifyOrder(int doVerify); +BLS_DLL_API void blsPublicKeyVerifyOrder(int doVerify); +// deserialize under VerifyOrder(true) = deserialize under VerifyOrder(false) + IsValidOrder +BLS_DLL_API int blsSignatureIsValidOrder(const blsSignature *sig); +BLS_DLL_API int blsPublicKeyIsValidOrder(const blsPublicKey *pub); + +#ifndef BLS_MINIMUM_API + +/* + sign the hash + use the low (bitSize of r) - 1 bit of h + return 0 if success else -1 + NOTE : return false if h is zero or c1 or -c1 value for BN254. see hashTest() in test/bls_test.hpp +*/ +BLS_DLL_API int blsSignHash(blsSignature *sig, const blsSecretKey *sec, const void *h, mclSize size); +// return 1 if valid +BLS_DLL_API int blsVerifyHash(const blsSignature *sig, const blsPublicKey *pub, const void *h, mclSize size); + +/* + verify aggSig with pubVec[0, n) and hVec[0, n) + e(aggSig, Q) = prod_i e(hVec[i], pubVec[i]) + return 1 if valid + @note do not check duplication of hVec +*/ +BLS_DLL_API int blsVerifyAggregatedHashes(const blsSignature *aggSig, const blsPublicKey *pubVec, const void *hVec, size_t sizeofHash, mclSize n); + +// sub +BLS_DLL_API void blsSecretKeySub(blsSecretKey *sec, const blsSecretKey *rhs); +BLS_DLL_API void blsPublicKeySub(blsPublicKey *pub, const blsPublicKey *rhs); +BLS_DLL_API void blsSignatureSub(blsSignature *sig, const blsSignature *rhs); + +// not thread safe version (old blsInit) +BLS_DLL_API int blsInitNotThreadSafe(int curve, int compiledTimeVar); + +BLS_DLL_API mclSize blsGetOpUnitSize(void); +// return strlen(buf) if success else 0 +BLS_DLL_API int blsGetCurveOrder(char *buf, mclSize maxBufSize); +BLS_DLL_API int blsGetFieldOrder(char *buf, mclSize maxBufSize); + +// return bytes for serialized G1(=Fp) +BLS_DLL_API int blsGetG1ByteSize(void); + +// return bytes for serialized Fr +BLS_DLL_API int blsGetFrByteSize(void); + +// get a generator of G2 +BLS_DLL_API void blsGetGeneratorOfG2(blsPublicKey *pub); + +// return 0 if success +BLS_DLL_API int blsIdSetDecStr(blsId *id, const char *buf, mclSize bufSize); +BLS_DLL_API int blsIdSetHexStr(blsId *id, const char *buf, mclSize bufSize); + +/* + return strlen(buf) if success else 0 + buf is '\0' terminated +*/ +BLS_DLL_API mclSize blsIdGetDecStr(char *buf, mclSize maxBufSize, const blsId *id); +BLS_DLL_API mclSize blsIdGetHexStr(char *buf, mclSize maxBufSize, const blsId *id); + +// hash buf and set +BLS_DLL_API int blsHashToSecretKey(blsSecretKey *sec, const void *buf, mclSize bufSize); +#ifndef MCL_DONT_USE_CSPRNG +/* + set secretKey if system has /dev/urandom or CryptGenRandom + return 0 if success else -1 +*/ +BLS_DLL_API int blsSecretKeySetByCSPRNG(blsSecretKey *sec); +#endif + +BLS_DLL_API void blsGetPop(blsSignature *sig, const blsSecretKey *sec); + +BLS_DLL_API int blsVerifyPop(const blsSignature *sig, const blsPublicKey *pub); +////////////////////////////////////////////////////////////////////////// +// the following apis will be removed + +// mask buf with (1 << (bitLen(r) - 1)) - 1 if buf >= r +BLS_DLL_API int blsIdSetLittleEndian(blsId *id, const void *buf, mclSize bufSize); +/* + return written byte size if success else 0 +*/ +BLS_DLL_API mclSize blsIdGetLittleEndian(void *buf, mclSize maxBufSize, const blsId *id); + +// return 0 if success +BLS_DLL_API int blsSecretKeySetDecStr(blsSecretKey *sec, const char *buf, mclSize bufSize); +BLS_DLL_API int blsSecretKeySetHexStr(blsSecretKey *sec, const char *buf, mclSize bufSize); +/* + return written byte size if success else 0 +*/ +BLS_DLL_API mclSize blsSecretKeyGetLittleEndian(void *buf, mclSize maxBufSize, const blsSecretKey *sec); +/* + return strlen(buf) if success else 0 + buf is '\0' terminated +*/ +BLS_DLL_API mclSize blsSecretKeyGetDecStr(char *buf, mclSize maxBufSize, const blsSecretKey *sec); +BLS_DLL_API mclSize blsSecretKeyGetHexStr(char *buf, mclSize maxBufSize, const blsSecretKey *sec); +BLS_DLL_API int blsPublicKeySetHexStr(blsPublicKey *pub, const char *buf, mclSize bufSize); +BLS_DLL_API mclSize blsPublicKeyGetHexStr(char *buf, mclSize maxBufSize, const blsPublicKey *pub); +BLS_DLL_API int blsSignatureSetHexStr(blsSignature *sig, const char *buf, mclSize bufSize); +BLS_DLL_API mclSize blsSignatureGetHexStr(char *buf, mclSize maxBufSize, const blsSignature *sig); + +/* + Diffie Hellman key exchange + out = sec * pub +*/ +BLS_DLL_API void blsDHKeyExchange(blsPublicKey *out, const blsSecretKey *sec, const blsPublicKey *pub); + +#endif // BLS_MINIMUM_API + +#ifdef __cplusplus +} +#endif diff --git a/vendor/github.com/dexon-foundation/bls/include/bls/bls.hpp b/vendor/github.com/dexon-foundation/bls/include/bls/bls.hpp new file mode 100644 index 000000000..b32b7e1fa --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/include/bls/bls.hpp @@ -0,0 +1,508 @@ +#pragma once +/** + @file + @brief BLS threshold signature on BN curve + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <bls/bls.h> +#include <stdexcept> +#include <vector> +#include <string> +#include <iosfwd> +#include <stdint.h> + +namespace bls { + +// same value with IoMode of mcl/op.hpp +enum { + IoBin = 2, // binary number + IoDec = 10, // decimal number + IoHex = 16, // hexadecimal number + IoPrefix = 128, // append '0b'(bin) or '0x'(hex) + IoSerialize = 512, + IoFixedByteSeq = IoSerialize // fixed byte representation +}; + +/* + BLS signature + e : G2 x G1 -> Fp12 + Q in G2 ; fixed global parameter + H : {str} -> G1 + s : secret key + sQ ; public key + s H(m) ; signature of m + verify ; e(sQ, H(m)) = e(Q, s H(m)) +*/ + +/* + initialize this library + call this once before using the other method + @param curve [in] type of curve + @param compiledTimevar [in] use the default value + @note init() is not thread safe +*/ +inline void init(int curve = mclBn_CurveFp254BNb, int compiledTimeVar = MCLBN_COMPILED_TIME_VAR) +{ + if (blsInit(curve, compiledTimeVar) != 0) throw std::invalid_argument("blsInit"); +} +inline size_t getOpUnitSize() { return blsGetOpUnitSize(); } + +inline void getCurveOrder(std::string& str) +{ + str.resize(1024); + mclSize n = blsGetCurveOrder(&str[0], str.size()); + if (n == 0) throw std::runtime_error("blsGetCurveOrder"); + str.resize(n); +} +inline void getFieldOrder(std::string& str) +{ + str.resize(1024); + mclSize n = blsGetFieldOrder(&str[0], str.size()); + if (n == 0) throw std::runtime_error("blsGetFieldOrder"); + str.resize(n); +} +inline int getG1ByteSize() { return blsGetG1ByteSize(); } +inline int getFrByteSize() { return blsGetFrByteSize(); } + +namespace local { +/* + the value of secretKey and Id must be less than + r = 0x2523648240000001ba344d8000000007ff9f800000000010a10000000000000d + sizeof(uint64_t) * keySize byte +*/ +const size_t keySize = MCLBN_FP_UNIT_SIZE; +} + +class SecretKey; +class PublicKey; +class Signature; +class Id; + +typedef std::vector<SecretKey> SecretKeyVec; +typedef std::vector<PublicKey> PublicKeyVec; +typedef std::vector<Signature> SignatureVec; +typedef std::vector<Id> IdVec; + +class Id { + blsId self_; + friend class PublicKey; + friend class SecretKey; + friend class Signature; +public: + Id(unsigned int id = 0) + { + blsIdSetInt(&self_, id); + } + bool operator==(const Id& rhs) const + { + return blsIdIsEqual(&self_, &rhs.self_) == 1; + } + bool operator!=(const Id& rhs) const { return !(*this == rhs); } + friend std::ostream& operator<<(std::ostream& os, const Id& id) + { + std::string str; + id.getStr(str, 16|IoPrefix); + return os << str; + } + friend std::istream& operator>>(std::istream& is, Id& id) + { + std::string str; + is >> str; + id.setStr(str, 16); + return is; + } + void getStr(std::string& str, int ioMode = 0) const + { + str.resize(1024); + size_t n = mclBnFr_getStr(&str[0], str.size(), &self_.v, ioMode); + if (n == 0) throw std::runtime_error("mclBnFr_getStr"); + str.resize(n); + } + void setStr(const std::string& str, int ioMode = 0) + { + int ret = mclBnFr_setStr(&self_.v, str.c_str(), str.size(), ioMode); + if (ret != 0) throw std::runtime_error("mclBnFr_setStr"); + } + bool isZero() const + { + return mclBnFr_isZero(&self_.v) == 1; + } + /* + set p[0, .., keySize) + @note the value must be less than r + */ + void set(const uint64_t *p) + { + setLittleEndian(p, local::keySize * sizeof(uint64_t)); + } + // bufSize is truncted/zero extended to keySize + void setLittleEndian(const void *buf, size_t bufSize) + { + mclBnFr_setLittleEndian(&self_.v, buf, bufSize); + } +}; + +/* + s ; secret key +*/ +class SecretKey { + blsSecretKey self_; +public: + bool operator==(const SecretKey& rhs) const + { + return blsSecretKeyIsEqual(&self_, &rhs.self_) == 1; + } + bool operator!=(const SecretKey& rhs) const { return !(*this == rhs); } + friend std::ostream& operator<<(std::ostream& os, const SecretKey& sec) + { + std::string str; + sec.getStr(str, 16|IoPrefix); + return os << str; + } + friend std::istream& operator>>(std::istream& is, SecretKey& sec) + { + std::string str; + is >> str; + sec.setStr(str); + return is; + } + void getStr(std::string& str, int ioMode = 0) const + { + str.resize(1024); + size_t n = mclBnFr_getStr(&str[0], str.size(), &self_.v, ioMode); + if (n == 0) throw std::runtime_error("mclBnFr_getStr"); + str.resize(n); + } + void setStr(const std::string& str, int ioMode = 0) + { + int ret = mclBnFr_setStr(&self_.v, str.c_str(), str.size(), ioMode); + if (ret != 0) throw std::runtime_error("mclBnFr_setStr"); + } + /* + initialize secretKey with random number + */ + void init() + { + int ret = blsSecretKeySetByCSPRNG(&self_); + if (ret != 0) throw std::runtime_error("blsSecretKeySetByCSPRNG"); + } + /* + set secretKey with p[0, .., keySize) and set id = 0 + @note the value must be less than r + */ + void set(const uint64_t *p) + { + setLittleEndian(p, local::keySize * sizeof(uint64_t)); + } + // bufSize is truncted/zero extended to keySize + void setLittleEndian(const void *buf, size_t bufSize) + { + mclBnFr_setLittleEndian(&self_.v, buf, bufSize); + } + // set hash of buf + void setHashOf(const void *buf, size_t bufSize) + { + int ret = mclBnFr_setHashOf(&self_.v, buf, bufSize); + if (ret != 0) throw std::runtime_error("mclBnFr_setHashOf"); + } + void getPublicKey(PublicKey& pub) const; + // constant time sign + // sign hash(m) + void sign(Signature& sig, const void *m, size_t size) const; + void sign(Signature& sig, const std::string& m) const + { + sign(sig, m.c_str(), m.size()); + } + // sign hashed value + void signHash(Signature& sig, const void *h, size_t size) const; + void signHash(Signature& sig, const std::string& h) const + { + signHash(sig, h.c_str(), h.size()); + } + /* + make Pop(Proof of Possesion) + pop = prv.sign(pub) + */ + void getPop(Signature& pop) const; + /* + make [s_0, ..., s_{k-1}] to prepare k-out-of-n secret sharing + */ + void getMasterSecretKey(SecretKeyVec& msk, size_t k) const + { + if (k <= 1) throw std::invalid_argument("getMasterSecretKey"); + msk.resize(k); + msk[0] = *this; + for (size_t i = 1; i < k; i++) { + msk[i].init(); + } + } + /* + set a secret key for id > 0 from msk + */ + void set(const SecretKeyVec& msk, const Id& id) + { + set(msk.data(), msk.size(), id); + } + /* + recover secretKey from k secVec + */ + void recover(const SecretKeyVec& secVec, const IdVec& idVec) + { + if (secVec.size() != idVec.size()) throw std::invalid_argument("SecretKey::recover"); + recover(secVec.data(), idVec.data(), idVec.size()); + } + /* + add secret key + */ + void add(const SecretKey& rhs); + + // the following methods are for C api + /* + the size of msk must be k + */ + void set(const SecretKey *msk, size_t k, const Id& id) + { + int ret = blsSecretKeyShare(&self_, &msk->self_, k, &id.self_); + if (ret != 0) throw std::runtime_error("blsSecretKeyShare"); + } + void recover(const SecretKey *secVec, const Id *idVec, size_t n) + { + int ret = blsSecretKeyRecover(&self_, &secVec->self_, &idVec->self_, n); + if (ret != 0) throw std::runtime_error("blsSecretKeyRecover:same id"); + } +}; + +/* + sQ ; public key +*/ +class PublicKey { + blsPublicKey self_; + friend class SecretKey; + friend class Signature; +public: + bool operator==(const PublicKey& rhs) const + { + return blsPublicKeyIsEqual(&self_, &rhs.self_) == 1; + } + bool operator!=(const PublicKey& rhs) const { return !(*this == rhs); } + friend std::ostream& operator<<(std::ostream& os, const PublicKey& pub) + { + std::string str; + pub.getStr(str, 16|IoPrefix); + return os << str; + } + friend std::istream& operator>>(std::istream& is, PublicKey& pub) + { + std::string str; + is >> str; + if (str != "0") { + // 1 <x.a> <x.b> <y.a> <y.b> + std::string t; + for (int i = 0; i < 4; i++) { + is >> t; + str += ' '; + str += t; + } + } + pub.setStr(str, 16); + return is; + } + void getStr(std::string& str, int ioMode = 0) const + { + str.resize(1024); + size_t n = mclBnG2_getStr(&str[0], str.size(), &self_.v, ioMode); + if (n == 0) throw std::runtime_error("mclBnG2_getStr"); + str.resize(n); + } + void setStr(const std::string& str, int ioMode = 0) + { + int ret = mclBnG2_setStr(&self_.v, str.c_str(), str.size(), ioMode); + if (ret != 0) throw std::runtime_error("mclBnG2_setStr"); + } + /* + set public for id from mpk + */ + void set(const PublicKeyVec& mpk, const Id& id) + { + set(mpk.data(), mpk.size(), id); + } + /* + recover publicKey from k pubVec + */ + void recover(const PublicKeyVec& pubVec, const IdVec& idVec) + { + if (pubVec.size() != idVec.size()) throw std::invalid_argument("PublicKey::recover"); + recover(pubVec.data(), idVec.data(), idVec.size()); + } + /* + add public key + */ + void add(const PublicKey& rhs) + { + blsPublicKeyAdd(&self_, &rhs.self_); + } + + // the following methods are for C api + void set(const PublicKey *mpk, size_t k, const Id& id) + { + int ret = blsPublicKeyShare(&self_, &mpk->self_, k, &id.self_); + if (ret != 0) throw std::runtime_error("blsPublicKeyShare"); + } + void recover(const PublicKey *pubVec, const Id *idVec, size_t n) + { + int ret = blsPublicKeyRecover(&self_, &pubVec->self_, &idVec->self_, n); + if (ret != 0) throw std::runtime_error("blsPublicKeyRecover"); + } +}; + +/* + s H(m) ; signature +*/ +class Signature { + blsSignature self_; + friend class SecretKey; +public: + bool operator==(const Signature& rhs) const + { + return blsSignatureIsEqual(&self_, &rhs.self_) == 1; + } + bool operator!=(const Signature& rhs) const { return !(*this == rhs); } + friend std::ostream& operator<<(std::ostream& os, const Signature& sig) + { + std::string str; + sig.getStr(str, 16|IoPrefix); + return os << str; + } + friend std::istream& operator>>(std::istream& is, Signature& sig) + { + std::string str; + is >> str; + if (str != "0") { + // 1 <x> <y> + std::string t; + for (int i = 0; i < 2; i++) { + is >> t; + str += ' '; + str += t; + } + } + sig.setStr(str, 16); + return is; + } + void getStr(std::string& str, int ioMode = 0) const + { + str.resize(1024); + size_t n = mclBnG1_getStr(&str[0], str.size(), &self_.v, ioMode); + if (n == 0) throw std::runtime_error("mclBnG1_getStr"); + str.resize(n); + } + void setStr(const std::string& str, int ioMode = 0) + { + int ret = mclBnG1_setStr(&self_.v, str.c_str(), str.size(), ioMode); + if (ret != 0) throw std::runtime_error("mclBnG1_setStr"); + } + bool verify(const PublicKey& pub, const void *m, size_t size) const + { + return blsVerify(&self_, &pub.self_, m, size) == 1; + } + bool verify(const PublicKey& pub, const std::string& m) const + { + return verify(pub, m.c_str(), m.size()); + } + bool verifyHash(const PublicKey& pub, const void *h, size_t size) const + { + return blsVerifyHash(&self_, &pub.self_, h, size) == 1; + } + bool verifyHash(const PublicKey& pub, const std::string& h) const + { + return verifyHash(pub, h.c_str(), h.size()); + } + bool verifyAggregatedHashes(const PublicKey *pubVec, const void *hVec, size_t sizeofHash, size_t n) const + { + return blsVerifyAggregatedHashes(&self_, &pubVec[0].self_, hVec, sizeofHash, n) == 1; + } + /* + verify self(pop) with pub + */ + bool verify(const PublicKey& pub) const + { + std::string str; + pub.getStr(str); + return verify(pub, str); + } + /* + recover sig from k sigVec + */ + void recover(const SignatureVec& sigVec, const IdVec& idVec) + { + if (sigVec.size() != idVec.size()) throw std::invalid_argument("Signature::recover"); + recover(sigVec.data(), idVec.data(), idVec.size()); + } + /* + add signature + */ + void add(const Signature& rhs) + { + blsSignatureAdd(&self_, &rhs.self_); + } + + // the following methods are for C api + void recover(const Signature* sigVec, const Id *idVec, size_t n) + { + int ret = blsSignatureRecover(&self_, &sigVec->self_, &idVec->self_, n); + if (ret != 0) throw std::runtime_error("blsSignatureRecover:same id"); + } +}; + +/* + make master public key [s_0 Q, ..., s_{k-1} Q] from msk +*/ +inline void getMasterPublicKey(PublicKeyVec& mpk, const SecretKeyVec& msk) +{ + const size_t n = msk.size(); + mpk.resize(n); + for (size_t i = 0; i < n; i++) { + msk[i].getPublicKey(mpk[i]); + } +} + +inline void SecretKey::getPublicKey(PublicKey& pub) const +{ + blsGetPublicKey(&pub.self_, &self_); +} +inline void SecretKey::sign(Signature& sig, const void *m, size_t size) const +{ + blsSign(&sig.self_, &self_, m, size); +} +inline void SecretKey::signHash(Signature& sig, const void *h, size_t size) const +{ + if (blsSignHash(&sig.self_, &self_, h, size) != 0) throw std::runtime_error("bad h"); +} +inline void SecretKey::getPop(Signature& pop) const +{ + PublicKey pub; + getPublicKey(pub); + std::string m; + pub.getStr(m); + sign(pop, m); +} + +/* + make pop from msk and mpk +*/ +inline void getPopVec(SignatureVec& popVec, const SecretKeyVec& msk) +{ + const size_t n = msk.size(); + popVec.resize(n); + for (size_t i = 0; i < n; i++) { + msk[i].getPop(popVec[i]); + } +} + +inline Signature operator+(const Signature& a, const Signature& b) { Signature r(a); r.add(b); return r; } +inline PublicKey operator+(const PublicKey& a, const PublicKey& b) { PublicKey r(a); r.add(b); return r; } +inline SecretKey operator+(const SecretKey& a, const SecretKey& b) { SecretKey r(a); r.add(b); return r; } + +} //bls diff --git a/vendor/github.com/dexon-foundation/bls/lib/.emptydir b/vendor/github.com/dexon-foundation/bls/lib/.emptydir new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/lib/.emptydir diff --git a/vendor/github.com/dexon-foundation/bls/mk.bat b/vendor/github.com/dexon-foundation/bls/mk.bat new file mode 100644 index 000000000..c5dfac91a --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/mk.bat @@ -0,0 +1,19 @@ +@echo off +if "%1"=="-s" ( + echo use static lib + set CFLAGS=%CFLAGS% /DMCLBN_NO_AUTOLINK /DBLS_DONT_EXPORT +) else if "%1"=="-d" ( + echo use dynamic lib +) else ( + echo "mk (-s|-d) <source file>" + goto exit +) +set SRC=%2 +set EXE=%SRC:.cpp=.exe% +set EXE=%EXE:.c=.exe% +set EXE=%EXE:test\=bin\% +set EXE=%EXE:sample\=bin\% +echo cl %CFLAGS% %2 /Fe:%EXE% /link %LDFLAGS% +cl %CFLAGS% %2 /Fe:%EXE% /link %LDFLAGS% + +:exit diff --git a/vendor/github.com/dexon-foundation/bls/mkdll.bat b/vendor/github.com/dexon-foundation/bls/mkdll.bat new file mode 100755 index 000000000..17e934f92 --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/mkdll.bat @@ -0,0 +1,8 @@ +rem @echo off + +call setvar.bat dll +echo make bls384.dll +cl /c %CFLAGS% /DBLS_NO_AUTOLINK /Foobj/bls_c.obj src/bls_c.cpp +cl /c %CFLAGS% /DBLS_NO_AUTOLINK /Foobj/fp.obj ../mcl/src/fp.cpp +lib /OUT:lib/bls384.lib /nodefaultlib obj/bls_c.obj obj/fp.obj %LDFLAGS% +cl /LD /MT obj/bls_c.obj obj/fp.obj %CFLAGS% /link /out:bin/bls384.dll %LDFLAGS% diff --git a/vendor/github.com/dexon-foundation/bls/mklib.bat b/vendor/github.com/dexon-foundation/bls/mklib.bat new file mode 100644 index 000000000..fca9333fc --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/mklib.bat @@ -0,0 +1,22 @@ +@echo off +call ..\mcl\setvar.bat +if "%1"=="dll" ( + echo make dynamic library DLL +) else ( + echo make static library LIB +) +call setvar.bat + +if "%1"=="dll" ( + cl /c %CFLAGS% /Foobj/bls_c256.obj src/bls_c256.cpp /DBLS_NO_AUTOLINK + cl /c %CFLAGS% /Foobj/bls_c384.obj src/bls_c384.cpp /DBLS_NO_AUTOLINK + cl /c %CFLAGS% /Foobj/fp.obj ../mcl/src/fp.cpp + link /nologo /DLL /OUT:bin\bls256.dll obj\bls_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\bls256.lib + link /nologo /DLL /OUT:bin\bls384.dll obj\bls_c384.obj obj\fp.obj %LDFLAGS% /implib:lib\bls384.lib +) else ( + cl /c %CFLAGS% /Foobj/bls_c256.obj src/bls_c256.cpp + cl /c %CFLAGS% /Foobj/bls_c384.obj src/bls_c384.cpp + cl /c %CFLAGS% /Foobj/fp.obj ../mcl/src/fp.cpp /DMCLBN_DONT_EXPORT + lib /OUT:lib/bls256.lib /nodefaultlib obj/bls_c256.obj obj/fp.obj %LDFLAGS% + lib /OUT:lib/bls384.lib /nodefaultlib obj/bls_c384.obj obj/fp.obj %LDFLAGS% +) diff --git a/vendor/github.com/dexon-foundation/bls/obj/.emptydir b/vendor/github.com/dexon-foundation/bls/obj/.emptydir new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/obj/.emptydir diff --git a/vendor/github.com/dexon-foundation/bls/readme.md b/vendor/github.com/dexon-foundation/bls/readme.md new file mode 100644 index 000000000..544fee3ca --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/readme.md @@ -0,0 +1,170 @@ +[![Build Status](https://travis-ci.com/Spiderpowa/bls.png?branch=dev)](https://travis-ci.com/Spiderpowa/bls) + +# BLS threshold signature + +An implementation of BLS threshold signature + +# Installation Requirements + +Create a working directory (e.g., work) and clone the following repositories. +``` +mkdir work +cd work +git clone git://github.com/herumi/mcl.git +git clone git://github.com/herumi/bls.git +git clone git://github.com/herumi/cybozulib_ext ; for only Windows +``` + +# **REMARK** libbls.a for C++ interface(bls/bls.hpp) is removed +Link `lib/libbls256.a` or `lib/libbls384.a` to use `bls/bls.hpp` according to MCLBN_FP_UNIT_SIZE = 4 or 6. + +# Build and test for Linux +To make and test, run +``` +cd bls +make test +``` +To make sample programs, run +``` +make sample_test +``` + +# Build and test for Windows +1) make static library and use it +``` +mklib +mk -s test\bls_c384_test.cpp +bin\bls_c384_test.exe +``` + +2) make dynamic library and use it +``` +mklib dll +mk -d test\bls_c384_test.cpp +bin\bls_c384_test.exe +``` + +# Library +* libbls256.a/libbls256_dy.so ; for BN254 compiled with MCLBN_FP_UNIT_SIZE=4 +* libbls384.a/libbls384_dy.so ; for BN254/BN381_1/BLS12_381 compiled with MCLBN_FP_UNIT_SIZE=6 + +See `mcl/include/curve_type.h` for curve parameter + +# API + +## Basic API + +BLS signature +``` +e : G2 x G1 -> Fp12 ; optimal ate pairing over BN curve +Q in G2 ; fixed global parameter +H : {str} -> G1 +s in Fr: secret key +sQ in G2; public key +s H(m) in G1; signature of m +verify ; e(sQ, H(m)) = e(Q, s H(m)) +``` + +``` +void bls::init(); +``` + +Initialize this library. Call this once to use the other api. + +``` +void SecretKey::init(); +``` + +Initialize the instance of SecretKey. `s` is a random number. + +``` +void SecretKey::getPublicKey(PublicKey& pub) const; +``` + +Get public key `sQ` for the secret key `s`. + +``` +void SecretKey::sign(Sign& sign, const std::string& m) const; +``` + +Make sign `s H(m)` from message m. + +``` +bool Sign::verify(const PublicKey& pub, const std::string& m) const; +``` + +Verify sign with pub and m and return true if it is valid. + +``` +e(sQ, H(m)) == e(Q, s H(m)) +``` + +### Secret Sharing API + +``` +void SecretKey::getMasterSecretKey(SecretKeyVec& msk, size_t k) const; +``` + +Prepare k-out-of-n secret sharing for the secret key. +`msk[0]` is the original secret key `s` and `msk[i]` for i > 0 are random secret key. + +``` +void SecretKey::set(const SecretKeyVec& msk, const Id& id); +``` + +Make secret key f(id) from msk and id where f(x) = msk[0] + msk[1] x + ... + msk[k-1] x^{k-1}. + +You can make a public key `f(id)Q` from each secret key f(id) for id != 0 and sign a message. + +``` +void Sign::recover(const SignVec& signVec, const IdVec& idVec); +``` + +Collect k pair of sign `f(id) H(m)` and `id` for a message m and recover the original signature `s H(m)` for the secret key `s`. + +### PoP (Proof of Possesion) + +``` +void SecretKey::getPop(Sign& pop) const; +``` + +Sign pub and make a pop `s H(sQ)` + +``` +bool Sign::verify(const PublicKey& pub) const; +``` + +Verify a public key by pop. + +# Check the order of a point + +deserializer functions check whether a point has correct order and +the cost is heavy for especially G2. +If you do not want to check it, then call +``` +void blsSignatureVerifyOrder(false); +void blsPublicKeyVerifyOrder(false); +``` + +cf. subgroup attack + +# Go +``` +make test_go +``` + +# WASM(WebAssembly) +``` +mkdir ../bls-wasm +make bls-wasm +``` +see [BLS signature demo on browser](https://herumi.github.io/bls-wasm/bls-demo.html) + +# License + +modified new BSD License +http://opensource.org/licenses/BSD-3-Clause + +# Author + +MITSUNARI Shigeo(herumi@nifty.com) diff --git a/vendor/github.com/dexon-foundation/bls/release.props b/vendor/github.com/dexon-foundation/bls/release.props new file mode 100644 index 000000000..886ce6890 --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/release.props @@ -0,0 +1,12 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ImportGroup Label="PropertySheets" /> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup /> + <ItemDefinitionGroup> + <ClCompile> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + </ClCompile> + </ItemDefinitionGroup> + <ItemGroup /> +</Project>
\ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/bls/setvar.bat b/vendor/github.com/dexon-foundation/bls/setvar.bat new file mode 100755 index 000000000..b45af24b9 --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/setvar.bat @@ -0,0 +1,6 @@ +@echo off +call ..\mcl\setvar.bat +set CFLAGS=%CFLAGS% /I ..\mcl\include /I .\ +set LDFLAGS=%LDFLAGS% /LIBPATH:..\mcl\lib +echo CFLAGS=%CFLAGS% +echo LDFLAGS=%LDFLAGS%
\ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/bls/src/bls_c256.cpp b/vendor/github.com/dexon-foundation/bls/src/bls_c256.cpp new file mode 100644 index 000000000..a9f3412ea --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/src/bls_c256.cpp @@ -0,0 +1,3 @@ +#define MCLBN_FP_UNIT_SIZE 4 +#include "bls_c_impl.hpp" + diff --git a/vendor/github.com/dexon-foundation/bls/src/bls_c384.cpp b/vendor/github.com/dexon-foundation/bls/src/bls_c384.cpp new file mode 100644 index 000000000..d28f8547b --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/src/bls_c384.cpp @@ -0,0 +1,3 @@ +#define MCLBN_FP_UNIT_SIZE 6 +#include "bls_c_impl.hpp" + diff --git a/vendor/github.com/dexon-foundation/bls/src/bls_c_impl.hpp b/vendor/github.com/dexon-foundation/bls/src/bls_c_impl.hpp new file mode 100644 index 000000000..16b79f913 --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/src/bls_c_impl.hpp @@ -0,0 +1,461 @@ +#define MCLBN_DONT_EXPORT +#define BLS_DLL_EXPORT + +#include <bls/bls.h> + +#include "../mcl/src/bn_c_impl.hpp" + +/* + BLS signature + e : G1 x G2 -> Fp12 + Q in G2 ; fixed global parameter + H : {str} -> G1 + s : secret key + sQ ; public key + s H(m) ; signature of m + verify ; e(sQ, H(m)) = e(Q, s H(m)) +*/ + +static G2 g_Q; +const size_t maxQcoeffN = 128; +static mcl::FixedArray<Fp6, maxQcoeffN> g_Qcoeff; // precomputed Q +inline const G2& getQ() { return g_Q; } +inline const mcl::FixedArray<Fp6, maxQcoeffN>& getQcoeff() { return g_Qcoeff; } + +int blsInitNotThreadSafe(int curve, int compiledTimeVar) +{ + int ret = mclBn_init(curve, compiledTimeVar); + if (ret < 0) return ret; + bool b; + + if (curve == MCL_BN254) { + const char *Qx_BN254 = "11ccb44e77ac2c5dc32a6009594dbe331ec85a61290d6bbac8cc7ebb2dceb128 f204a14bbdac4a05be9a25176de827f2e60085668becdd4fc5fa914c9ee0d9a"; + const char *Qy_BN254 = "7c13d8487903ee3c1c5ea327a3a52b6cc74796b1760d5ba20ed802624ed19c8 8f9642bbaacb73d8c89492528f58932f2de9ac3e80c7b0e41f1a84f1c40182"; + g_Q.x.setStr(&b, Qx_BN254, 16); + g_Q.y.setStr(&b, Qy_BN254, 16); + g_Q.z = 1; + } else { + mapToG2(&b, g_Q, 1); + } + if (!b) return -100; + if (curve == MCL_BN254) { + #include "./qcoeff-bn254.hpp" + g_Qcoeff.resize(BN::param.precomputedQcoeffSize); + assert(g_Qcoeff.size() == CYBOZU_NUM_OF_ARRAY(tbl)); + for (size_t i = 0; i < g_Qcoeff.size(); i++) { + Fp6& x6 = g_Qcoeff[i]; + for (size_t j = 0; j < 6; j++) { + Fp& x = x6.getFp0()[j]; + mcl::fp::Unit *p = const_cast<mcl::fp::Unit*>(x.getUnit()); + for (size_t k = 0; k < 4; k++) { + p[k] = QcoeffTblBN254[i][j][k]; + } + } + } + } else { + precomputeG2(&b, g_Qcoeff, getQ()); + } + if (!b) return -101; + return 0; +} + +#ifdef __EMSCRIPTEN__ +extern "C" BLS_DLL_API void *blsMalloc(size_t n) +{ + return malloc(n); +} +extern "C" BLS_DLL_API void blsFree(void *p) +{ + free(p); +} +#endif + +#if !defined(__EMSCRIPTEN__) && !defined(__wasm__) + #if defined(CYBOZU_CPP_VERSION) && CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 + #include <mutex> + #define USE_STD_MUTEX + #else + #include <cybozu/mutex.hpp> + #define USE_CYBOZU_MUTEX + #endif +#endif + +int blsInit(int curve, int compiledTimeVar) +{ + int ret = 0; +#ifdef USE_STD_MUTEX + static std::mutex m; + std::lock_guard<std::mutex> lock(m); +#elif defined(USE_CYBOZU_MUTEX) + static cybozu::Mutex m; + cybozu::AutoLock lock(m); +#endif + static int g_curve = -1; + if (g_curve != curve) { + ret = blsInitNotThreadSafe(curve, compiledTimeVar); + g_curve = curve; + } + return ret; +} + +static inline const mclBnG1 *cast(const G1* x) { return (const mclBnG1*)x; } +static inline const mclBnG2 *cast(const G2* x) { return (const mclBnG2*)x; } + +void blsIdSetInt(blsId *id, int x) +{ + mclBnFr_setInt(&id->v, x); +} + +int blsSecretKeySetLittleEndian(blsSecretKey *sec, const void *buf, mclSize bufSize) +{ + return mclBnFr_setLittleEndian(&sec->v, buf, bufSize); +} + +void blsGetPublicKey(blsPublicKey *pub, const blsSecretKey *sec) +{ + mclBnG2_mul(&pub->v, cast(&getQ()), &sec->v); +} + +void blsSign(blsSignature *sig, const blsSecretKey *sec, const void *m, mclSize size) +{ + G1 Hm; + hashAndMapToG1(Hm, m, size); + mclBnG1_mulCT(&sig->v, cast(&Hm), &sec->v); +} + +/* + e(P1, Q1) == e(P2, Q2) + <=> finalExp(ML(P1, Q1)) == finalExp(ML(P2, Q2)) + <=> finalExp(ML(P1, Q1) / ML(P2, Q2)) == 1 + <=> finalExp(ML(P1, Q1) * ML(-P2, Q2)) == 1 + Q1 is precomputed +*/ +bool isEqualTwoPairings(const G1& P1, const Fp6* Q1coeff, const G1& P2, const G2& Q2) +{ + Fp12 e; + precomputedMillerLoop2mixed(e, P2, Q2, -P1, Q1coeff); + finalExp(e, e); + return e.isOne(); +} + +int blsVerify(const blsSignature *sig, const blsPublicKey *pub, const void *m, mclSize size) +{ + G1 Hm; + hashAndMapToG1(Hm, m, size); + /* + e(sHm, Q) = e(Hm, sQ) + e(sig, Q) = e(Hm, pub) + */ + return isEqualTwoPairings(*cast(&sig->v), getQcoeff().data(), Hm, *cast(&pub->v)); +} + +mclSize blsIdSerialize(void *buf, mclSize maxBufSize, const blsId *id) +{ + return mclBnFr_serialize(buf, maxBufSize, &id->v); +} + +mclSize blsSecretKeySerialize(void *buf, mclSize maxBufSize, const blsSecretKey *sec) +{ + return mclBnFr_serialize(buf, maxBufSize, &sec->v); +} + +mclSize blsPublicKeySerialize(void *buf, mclSize maxBufSize, const blsPublicKey *pub) +{ + return mclBnG2_serialize(buf, maxBufSize, &pub->v); +} + +mclSize blsSignatureSerialize(void *buf, mclSize maxBufSize, const blsSignature *sig) +{ + return mclBnG1_serialize(buf, maxBufSize, &sig->v); +} + +mclSize blsIdDeserialize(blsId *id, const void *buf, mclSize bufSize) +{ + return mclBnFr_deserialize(&id->v, buf, bufSize); +} + +mclSize blsSecretKeyDeserialize(blsSecretKey *sig, const void *buf, mclSize bufSize) +{ + return mclBnFr_deserialize(&sig->v, buf, bufSize); +} + +mclSize blsPublicKeyDeserialize(blsPublicKey *pub, const void *buf, mclSize bufSize) +{ + return mclBnG2_deserialize(&pub->v, buf, bufSize); +} + +mclSize blsSignatureDeserialize(blsSignature *sig, const void *buf, mclSize bufSize) +{ + return mclBnG1_deserialize(&sig->v, buf, bufSize); +} + +int blsIdIsEqual(const blsId *lhs, const blsId *rhs) +{ + return mclBnFr_isEqual(&lhs->v, &rhs->v); +} + +int blsSecretKeyIsEqual(const blsSecretKey *lhs, const blsSecretKey *rhs) +{ + return mclBnFr_isEqual(&lhs->v, &rhs->v); +} + +int blsPublicKeyIsEqual(const blsPublicKey *lhs, const blsPublicKey *rhs) +{ + return mclBnG2_isEqual(&lhs->v, &rhs->v); +} + +int blsSignatureIsEqual(const blsSignature *lhs, const blsSignature *rhs) +{ + return mclBnG1_isEqual(&lhs->v, &rhs->v); +} + +int blsSecretKeyShare(blsSecretKey *sec, const blsSecretKey* msk, mclSize k, const blsId *id) +{ + return mclBn_FrEvaluatePolynomial(&sec->v, &msk->v, k, &id->v); +} + +int blsPublicKeyShare(blsPublicKey *pub, const blsPublicKey *mpk, mclSize k, const blsId *id) +{ + return mclBn_G2EvaluatePolynomial(&pub->v, &mpk->v, k, &id->v); +} + +int blsSecretKeyRecover(blsSecretKey *sec, const blsSecretKey *secVec, const blsId *idVec, mclSize n) +{ + return mclBn_FrLagrangeInterpolation(&sec->v, &idVec->v, &secVec->v, n); +} + +int blsPublicKeyRecover(blsPublicKey *pub, const blsPublicKey *pubVec, const blsId *idVec, mclSize n) +{ + return mclBn_G2LagrangeInterpolation(&pub->v, &idVec->v, &pubVec->v, n); +} + +int blsSignatureRecover(blsSignature *sig, const blsSignature *sigVec, const blsId *idVec, mclSize n) +{ + return mclBn_G1LagrangeInterpolation(&sig->v, &idVec->v, &sigVec->v, n); +} + +void blsSecretKeyAdd(blsSecretKey *sec, const blsSecretKey *rhs) +{ + mclBnFr_add(&sec->v, &sec->v, &rhs->v); +} + +void blsPublicKeyAdd(blsPublicKey *pub, const blsPublicKey *rhs) +{ + mclBnG2_add(&pub->v, &pub->v, &rhs->v); +} + +void blsSignatureAdd(blsSignature *sig, const blsSignature *rhs) +{ + mclBnG1_add(&sig->v, &sig->v, &rhs->v); +} + +void blsSignatureVerifyOrder(int doVerify) +{ + mclBn_verifyOrderG1(doVerify); +} +void blsPublicKeyVerifyOrder(int doVerify) +{ + mclBn_verifyOrderG2(doVerify); +} +int blsSignatureIsValidOrder(const blsSignature *sig) +{ + return mclBnG1_isValidOrder(&sig->v); +} +int blsPublicKeyIsValidOrder(const blsPublicKey *pub) +{ + return mclBnG2_isValidOrder(&pub->v); +} + +#ifndef BLS_MINIMUM_API +inline bool toG1(G1& Hm, const void *h, mclSize size) +{ + Fp t; + t.setArrayMask((const char *)h, size); + bool b; + BN::mapToG1(&b, Hm, t); + return b; +} + +int blsVerifyAggregatedHashes(const blsSignature *aggSig, const blsPublicKey *pubVec, const void *hVec, size_t sizeofHash, mclSize n) +{ + if (n == 0) return 0; + /* + e(aggSig, Q) = prod_i e(hVec[i], pubVec[i]) + <=> finalExp(ML(-aggSig, Q) * prod_i ML(hVec[i], pubVec[i])) == 1 + */ + GT e1, e2; + BN::precomputedMillerLoop(e1, -*cast(&aggSig->v), g_Qcoeff.data()); + const char *ph = (const char*)hVec; + G1 h; + if (!toG1(h, &ph[0], sizeofHash)) return 0; + BN::millerLoop(e2, h, *cast(&pubVec[0].v)); + e1 *= e2; + for (size_t i = 1; i < n; i++) { + if (!toG1(h, &ph[i * sizeofHash], sizeofHash)) return 0; + BN::millerLoop(e2, h, *cast(&pubVec[i].v)); + e1 *= e2; + } + BN::finalExp(e1, e1); + return e1.isOne(); +} + +int blsSignHash(blsSignature *sig, const blsSecretKey *sec, const void *h, mclSize size) +{ + G1 Hm; + if (!toG1(Hm, h, size)) return -1; + mclBnG1_mulCT(&sig->v, cast(&Hm), &sec->v); + return 0; +} + +int blsVerifyHash(const blsSignature *sig, const blsPublicKey *pub, const void *h, mclSize size) +{ + G1 Hm; + if (!toG1(Hm, h, size)) return 0; + return isEqualTwoPairings(*cast(&sig->v), getQcoeff().data(), Hm, *cast(&pub->v)); +} + +void blsSecretKeySub(blsSecretKey *sec, const blsSecretKey *rhs) +{ + mclBnFr_sub(&sec->v, &sec->v, &rhs->v); +} + +void blsPublicKeySub(blsPublicKey *pub, const blsPublicKey *rhs) +{ + mclBnG2_sub(&pub->v, &pub->v, &rhs->v); +} + +void blsSignatureSub(blsSignature *sig, const blsSignature *rhs) +{ + mclBnG1_sub(&sig->v, &sig->v, &rhs->v); +} +mclSize blsGetOpUnitSize() // FpUint64Size +{ + return Fp::getUnitSize() * sizeof(mcl::fp::Unit) / sizeof(uint64_t); +} + +int blsGetCurveOrder(char *buf, mclSize maxBufSize) +{ + return (int)Fr::getModulo(buf, maxBufSize); +} + +int blsGetFieldOrder(char *buf, mclSize maxBufSize) +{ + return (int)Fp::getModulo(buf, maxBufSize); +} + +int blsGetG1ByteSize() +{ + return mclBn_getG1ByteSize(); +} + +int blsGetFrByteSize() +{ + return mclBn_getFrByteSize(); +} + +void blsGetGeneratorOfG2(blsPublicKey *pub) +{ + *(G2*)pub = getQ(); +} + +int blsIdSetDecStr(blsId *id, const char *buf, mclSize bufSize) +{ + return mclBnFr_setStr(&id->v, buf, bufSize, 10); +} +int blsIdSetHexStr(blsId *id, const char *buf, mclSize bufSize) +{ + return mclBnFr_setStr(&id->v, buf, bufSize, 16); +} + +int blsIdSetLittleEndian(blsId *id, const void *buf, mclSize bufSize) +{ + return mclBnFr_setLittleEndian(&id->v, buf, bufSize); +} + +mclSize blsIdGetDecStr(char *buf, mclSize maxBufSize, const blsId *id) +{ + return mclBnFr_getStr(buf, maxBufSize, &id->v, 10); +} + +mclSize blsIdGetHexStr(char *buf, mclSize maxBufSize, const blsId *id) +{ + return mclBnFr_getStr(buf, maxBufSize, &id->v, 16); +} + +int blsHashToSecretKey(blsSecretKey *sec, const void *buf, mclSize bufSize) +{ + return mclBnFr_setHashOf(&sec->v, buf, bufSize); +} + +#ifndef MCL_DONT_USE_CSPRNG +int blsSecretKeySetByCSPRNG(blsSecretKey *sec) +{ + return mclBnFr_setByCSPRNG(&sec->v); +} +#endif + +void blsGetPop(blsSignature *sig, const blsSecretKey *sec) +{ + blsPublicKey pub; + blsGetPublicKey(&pub, sec); + char buf[1024]; + mclSize n = mclBnG2_serialize(buf, sizeof(buf), &pub.v); + assert(n); + blsSign(sig, sec, buf, n); +} + +int blsVerifyPop(const blsSignature *sig, const blsPublicKey *pub) +{ + char buf[1024]; + mclSize n = mclBnG2_serialize(buf, sizeof(buf), &pub->v); + if (n == 0) return 0; + return blsVerify(sig, pub, buf, n); +} + +mclSize blsIdGetLittleEndian(void *buf, mclSize maxBufSize, const blsId *id) +{ + return mclBnFr_serialize(buf, maxBufSize, &id->v); +} +int blsSecretKeySetDecStr(blsSecretKey *sec, const char *buf, mclSize bufSize) +{ + return mclBnFr_setStr(&sec->v, buf, bufSize, 10); +} +int blsSecretKeySetHexStr(blsSecretKey *sec, const char *buf, mclSize bufSize) +{ + return mclBnFr_setStr(&sec->v, buf, bufSize, 16); +} +mclSize blsSecretKeyGetLittleEndian(void *buf, mclSize maxBufSize, const blsSecretKey *sec) +{ + return mclBnFr_serialize(buf, maxBufSize, &sec->v); +} +mclSize blsSecretKeyGetDecStr(char *buf, mclSize maxBufSize, const blsSecretKey *sec) +{ + return mclBnFr_getStr(buf, maxBufSize, &sec->v, 10); +} +mclSize blsSecretKeyGetHexStr(char *buf, mclSize maxBufSize, const blsSecretKey *sec) +{ + return mclBnFr_getStr(buf, maxBufSize, &sec->v, 16); +} +int blsPublicKeySetHexStr(blsPublicKey *pub, const char *buf, mclSize bufSize) +{ + return mclBnG2_setStr(&pub->v, buf, bufSize, 16); +} +mclSize blsPublicKeyGetHexStr(char *buf, mclSize maxBufSize, const blsPublicKey *pub) +{ + return mclBnG2_getStr(buf, maxBufSize, &pub->v, 16); +} +int blsSignatureSetHexStr(blsSignature *sig, const char *buf, mclSize bufSize) +{ + return mclBnG1_setStr(&sig->v, buf, bufSize, 16); +} +mclSize blsSignatureGetHexStr(char *buf, mclSize maxBufSize, const blsSignature *sig) +{ + return mclBnG1_getStr(buf, maxBufSize, &sig->v, 16); +} +void blsDHKeyExchange(blsPublicKey *out, const blsSecretKey *sec, const blsPublicKey *pub) +{ + mclBnG2_mulCT(&out->v, &pub->v, &sec->v); +} + +#endif + diff --git a/vendor/github.com/dexon-foundation/bls/src/proj/bls.vcxproj b/vendor/github.com/dexon-foundation/bls/src/proj/bls.vcxproj new file mode 100644 index 000000000..b78c97919 --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/src/proj/bls.vcxproj @@ -0,0 +1,92 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{1DBB979A-C212-45CD-9563-446A96F87F71}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>ec_test</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>v140</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>v140</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + <Import Project="$(SolutionDir)common.props" /> + <Import Project="$(SolutionDir)debug.props" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + <Import Project="$(SolutionDir)common.props" /> + <Import Project="$(SolutionDir)release.props" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <LinkIncremental>true</LinkIncremental> + <TargetExt>.lib</TargetExt> + <OutDir>$(SolutionDir)lib\</OutDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <LinkIncremental>false</LinkIncremental> + <TargetExt>.lib</TargetExt> + <OutDir>$(SolutionDir)lib\</OutDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + </ItemDefinitionGroup> + <ItemGroup> + <ClCompile Include="$(SolutionDir)src\bls.cpp" /> + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project>
\ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/bls/src/qcoeff-bn254.hpp b/vendor/github.com/dexon-foundation/bls/src/qcoeff-bn254.hpp new file mode 100644 index 000000000..18d169568 --- /dev/null +++ b/vendor/github.com/dexon-foundation/bls/src/qcoeff-bn254.hpp @@ -0,0 +1,564 @@ +#if MCL_SIZEOF_UNIT == 8 +static const uint64_t QcoeffTblBN254[][6][4] = { + { + {0x8c5c1b842e501310ull,0x6a418cdaced77710ull,0xf5ad725dd0d9a5ffull,0x012d501f32362f48ull,}, + {0xb8a8a8c11e51dc62ull,0xeaeb87e0f25a8611ull,0x9ba8738e4483d511ull,0x0664a4e36d64379eull,}, + {0x4a5af38c0aa5930aull,0x189fef61a74c388dull,0x83cc3225c7748527ull,0x2107491582310dc6ull,}, + {0x43aef621120a524aull,0x359d06a56e339486ull,0xdf5ab35e2222d9b1ull,0x20968bac939743acull,}, + {0xe8e4c0bb65cd67b8ull,0x255a0859bc55ff2cull,0xf1c57d1da3c060c6ull,0x138d83468f42cc0eull,}, + {0xdf985e5f768c149cull,0xa059c65c4b5da3ffull,0xed3d38d9f77bb968ull,0x02281f01255a850cull,}, + }, + { + {0xe3f53d8cfb4866a0ull,0xa0f9a16a21c159aaull,0x647fc210c7edf3a9ull,0x0db92f588c73caf9ull,}, + {0x6e9349b777bc2cf1ull,0x4fd987eb22e2469cull,0x666644a8e61b0a0eull,0x02f5bf9aae96c0deull,}, + {0xd5fd6288342479ebull,0x74022b671c6c8d8eull,0xda32d1b497cac7b2ull,0x0abecf35a19b5c7eull,}, + {0x1500891565b5f9aaull,0x4b7ce141cd7f4361ull,0xadf3447c534846c1ull,0x078b36a30d45de5full,}, + {0x37f172cff76e4b77ull,0x696d093b3ee37e4aull,0x2193797b7da56c6eull,0x1f5fc9efcbbb93e7ull,}, + {0x4c7d799b765b8f44ull,0x7adfd285e906edd8ull,0x79d68eaaf88a0885ull,0x20707d672be892cbull,}, + }, + { + {0x84bbf3849c691e74ull,0xeeb90e1efc3e3436ull,0xd9d9bb6257bf19e4ull,0x1b37ef04ea7d6f85ull,}, + {0xa6bdbbe0895ba12aull,0x58cade2ad0f1aa84ull,0xe0bb325678a2c748ull,0x23d1992e977c788cull,}, + {0x44559f0b0f4bb2ccull,0xe61b479bc88980eeull,0x2a70aa9df3e28c92ull,0x18039bee97722b74ull,}, + {0x9e5667da3db8e9e6ull,0x826ba07eb28c31f8ull,0x3f8b4eeb463d6923ull,0x1af85c2b10d3a2f0ull,}, + {0x8783f372684ea930ull,0x1aa0d9e436f41ea7ull,0xc84a3fc56af9f624ull,0x0d02698756cd5a2cull,}, + {0xe47407ede7b7c2afull,0x7d665c59e37ee7a7ull,0x542b91f12e0fa2a7ull,0x2084e73dc21f415eull,}, + }, + { + {0x2aebe318f3d167c6ull,0x5a2b2364b3abc614ull,0x31b2cdfd847e0053ull,0x04f0f63eed2a2f8cull,}, + {0x0573d320ee14ecf4ull,0x4e0dc9d92e543ddeull,0x58a280570cac8d36ull,0x16226935e8e9f9bdull,}, + {0x2d51a89174717a26ull,0x7341be7f883d0806ull,0xc9b4ee66364066ceull,0x018c79b95f127b49ull,}, + {0xe5420d2f4210dbd7ull,0x179c22d607a5c801ull,0xe3aae016e739bcc8ull,0x20c554233ddd50caull,}, + {0x6c5c4b29c77bb152ull,0xc30df398c85f0f2cull,0x5d5096a07ed6a257ull,0x0790d485c22a3639ull,}, + {0x8aadc7bbf2cb348eull,0xc339d87c2118c2cfull,0x8f49e0eb46961ca9ull,0x24740f0ee2134c2cull,}, + }, + { + {0x3b80354a061dbf06ull,0x961e0dfd74b84147ull,0xeb4b27dbde455fc1ull,0x100da22e6baf58b5ull,}, + {0xb156ffc78a60a8acull,0xc873bf776b8daaeeull,0x5a702f5446bf83fdull,0x1fce59e50222949bull,}, + {0x32d7640c0f717812ull,0xc58d05abdc19ceedull,0x1e63c2a492849373ull,0x23443ce8fb2d6feaull,}, + {0x870f2d1a2e39f52eull,0x7aa53cb06541429aull,0xee7b80b7031f23beull,0x0a8a095b3fdf2cf6ull,}, + {0x4e489bd278487a58ull,0xa914d93e5ed31065ull,0x6720c32ae140db7aull,0x0c22020e6a97031full,}, + {0x7535115a15420cceull,0x2cd019bac6256080ull,0x8234c3b61757e461ull,0x24d65e78c88298b2ull,}, + }, + { + {0x1f0bdc2cae53aa21ull,0x263506a6526641afull,0xacd41097fab7f62full,0x0b2c92453d474a86ull,}, + {0x2d23a58a46d63e3aull,0xa65ff6f1f716fe37ull,0xb86dc831f970fb2dull,0x0bc3cf38a191e63aull,}, + {0xeb0ca4fdeba41bbaull,0x969cf610e1a3a009ull,0x93c5d1bad6c7240bull,0x20ad29c9a9f1d6d6ull,}, + {0x006a12a187464b7aull,0xe354d9be0ec65202ull,0x9dff5c227947f5b7ull,0x24e3dc2833ba4d2full,}, + {0x9350693ebfdfb4c6ull,0x07d8abf27abb8fc3ull,0x58f5ab0b518e5113ull,0x125f2d7d40ed8650ull,}, + {0xc9fd435af6e86f34ull,0x04dc07374f373455ull,0xd040d286d71db00dull,0x141a1253f3bc2a50ull,}, + }, + { + {0xbcfee5dad6ad33b7ull,0x8cd72df36c5b56daull,0xc2949399ad52da69ull,0x0f6ffe6d68a398d6ull,}, + {0x777dc689b038aaf4ull,0xf7a8f41c7c04e9f1ull,0xcdab24ebcea39892ull,0x0178d69b1b935d81ull,}, + {0x65a001a22be563c6ull,0xfc1b2634dc76eafeull,0xed4f6ea19949392full,0x0e4e9127957d60e7ull,}, + {0x919a1c91a123e003ull,0x23f8ec239ef8a15dull,0x0470cb40e520d6f5ull,0x0be9b58098cd0f01ull,}, + {0x735e236220cf1152ull,0x82e68710903f65b1ull,0x6c932338d29169ccull,0x0e204d6a8c7d5034ull,}, + {0xac47692ec8245f1full,0x125db7c68d7e7a9bull,0x6ead9899d3150beaull,0x1957068d4a3da4b8ull,}, + }, + { + {0x45c168b2bce7b4daull,0x63afa3b276f9f910ull,0x60af02b6be9889a6ull,0x1adad7fa35385ae7ull,}, + {0x8d35cd7e7df59aa6ull,0x13cf29589f4b84b1ull,0xec6ecff2e1540013ull,0x0ecbf75abda6eb1dull,}, + {0xf6ce05fc3becfc23ull,0xe4ac8d257a7bf44eull,0x4c12510765eeaa43ull,0x06c870a377df50e4ull,}, + {0x2f6871bdc1d62dd7ull,0x80591505c1279cb7ull,0x1322088b2719ecd2ull,0x222e71f8f5995a2bull,}, + {0x2d1a1ab198363dfbull,0x35635c96cfa670ceull,0x7d5034dd7a26c656ull,0x003bf0608625abedull,}, + {0x98ca35cf5ed8716cull,0x2265e1237bc6df23ull,0x403b67933e14f23bull,0x17bd2dadc39729fdull,}, + }, + { + {0x73eaf26576b3ee71ull,0x1e385de29d896044ull,0x25a0f40f08a59317ull,0x19849935bfbebeeaull,}, + {0xc124cb642102cadcull,0x15bc408ad6ca7826ull,0x2d7fb7c9392b5314ull,0x191fe8471669f053ull,}, + {0x4519ddbccb6a7c26ull,0xf93bd195baec8228ull,0xacd754a721948defull,0x12f17b60c7e426bdull,}, + {0xcf447b92b04c15dbull,0xfcb7da793167f250ull,0xcbabb4ee570c4306ull,0x190ab94c6e5c81ceull,}, + {0x66edbe6740930cfcull,0x00c8c644983a181full,0xfe9e80b984c44209ull,0x1dd6f530584a7ffaull,}, + {0x14c61214aa1a9038ull,0xc34e5e23426cf8b6ull,0x89fca910ec46ae5full,0x04f1b9161a0f7c1dull,}, + }, + { + {0x60c3a79ac91ab4deull,0x541e051ca71a1a2bull,0x490abafd41a5d25cull,0x126275c8a46cf343ull,}, + {0xe5da0fcfffccd2b6ull,0xe3820301b166bb43ull,0xc6599e01bed6085dull,0x226548dff57c5cfbull,}, + {0x36428b1296882728ull,0xe08312e604299b9aull,0x5a15c889ce55478dull,0x172710198cd7c270ull,}, + {0x2450f291477cc1ddull,0xcb0f85c9d92d1bc3ull,0x86325c11cfe0e296ull,0x13ff03a4bd5be082ull,}, + {0x74298091e426bf55ull,0xbed700b48330ccdfull,0xb1ec45894f74fb11ull,0x1716d956bea958b0ull,}, + {0x91b29e513e9a4114ull,0xcdb3b373910c02fdull,0x268e148f9431fa75ull,0x1288ec8fde3009bdull,}, + }, + { + {0x02ae4c95e0afb8caull,0x8e7aec631cf8f5dfull,0xdfd9373259eca3c3ull,0x1fed34fb88af7224ull,}, + {0xc47e420205b5c88full,0x7207ef7451d1c567ull,0x53262358433f5294ull,0x193248ecf07ad085ull,}, + {0x49de15f9bb694200ull,0xc35f531086b5c242ull,0x95a1903858cd5140ull,0x032a4992511b1f97ull,}, + {0x42ee2c4def1faaa7ull,0xf6ca28bc9d99cd60ull,0x83c60d620a1e004cull,0x024ccf0ba1568759ull,}, + {0x6122291bf42e7d82ull,0x0866090d368a8205ull,0x11f04812ad6ec708ull,0x14cdebecb4ec13edull,}, + {0x535e8fd1ac15390dull,0xb37b579abb1773daull,0xbace0a295cd4b579ull,0x215e20d42270bcb0ull,}, + }, + { + {0x400bdbc644ac1d92ull,0x6d856667d971f595ull,0x03343816a1bd40f7ull,0x0361ad7534821a43ull,}, + {0x824222acf8437091ull,0x79141c3205b1032full,0x6b4d331fc9974530ull,0x1bf965a7ba2bade5ull,}, + {0x0bf66d1afdad6063ull,0xfe6571464fe71527ull,0x3ec25815cc90ab9bull,0x132ca2d9d51c3b56ull,}, + {0x37e3ae17fb5ac815ull,0x2dfedb4efe3f37c0ull,0x4b086ea5032745a4ull,0x0f966cabdd479e9full,}, + {0xb5266c229b7ebe0dull,0xc6717a5442929826ull,0xad22a19d8892adf1ull,0x172da87fcc14d4f9ull,}, + {0xae0d9866d891bb59ull,0xc500c36e3fe7d354ull,0xc2b582f2929b23abull,0x11428eb730dd4e8full,}, + }, + { + {0x81538fef8e07dae0ull,0x3c05274665489b39ull,0x36e4401350ceb55bull,0x23822f2029f31339ull,}, + {0x9a946e7c30090ad9ull,0x5bbc4c8c656ea3fdull,0x3cc2cecb7ec7227full,0x075a6fe87014899full,}, + {0x504b2ff7fbb0366bull,0xdbf315791bc3d5e8ull,0x34b16de185c8c4faull,0x0c722a3dffe0761cull,}, + {0xe1b2c1fc3b33c383ull,0xce84d3e5182665f5ull,0xbcedf2f72de4d795ull,0x1a84c62c0c4a6f6full,}, + {0x85ebabd309ae9553ull,0x1330ec03b0ac91f7ull,0x8f42ba9c8c1ae123ull,0x24c230fae89db4b9ull,}, + {0x63ba534e151566b6ull,0x7e44c5bd39e6334full,0x06921595325d11dfull,0x217f3a4e9d6413deull,}, + }, + { + {0x25ac71f16a82e592ull,0x47846dfdcc378ef2ull,0x75c53c75b38260a2ull,0x039b9da33bf9b997ull,}, + {0x53d30cb619b09dfeull,0x566d6a55a184cd91ull,0xa589c53ae28a8e0full,0x13c05b500d5f285eull,}, + {0xd22faf3af0a087b6ull,0xd5e538653ca52380ull,0x42c893b42092e080ull,0x18f7a1bdd3badfbbull,}, + {0xdba4e6c94bb0a0b2ull,0x323d4769578ee4deull,0xbaedb0f8e01fdb15ull,0x21ca037715dcfe00ull,}, + {0xe6ccc0bc06afac14ull,0xfb943c10916b581cull,0x2d5694a4c968aff2ull,0x054a1b209a812e31ull,}, + {0x1983e59a45dcb02cull,0x71dcb184a30af740ull,0xb75b69bd5ae155acull,0x13c7fc9ace199224ull,}, + }, + { + {0xddbd6b95067516b5ull,0x29ca0360372d54e8ull,0x3e2955c1d6748678ull,0x1f8b276aafcd2c7dull,}, + {0x893187796c272ab6ull,0xc843325fc53fa37eull,0xbe658fac833007a3ull,0x04bdf08356fbd83full,}, + {0xa0863d3fd012aa1cull,0xb1b2c2c3c2fa879eull,0x4cd718b80433407dull,0x1e1ff82d0a23f609ull,}, + {0x0c72fdbda5da70b5ull,0xfa2ad5a7dafb202bull,0xa63ce1e889feffefull,0x030b328f5fa93e0full,}, + {0xc4a01585dc609f7eull,0xade61ef3353eda34ull,0xfa884e9a73d65e8eull,0x24750424a4543a02ull,}, + {0x54f07e883bbe27b6ull,0xfb41ed1660623383ull,0xe112647feeae3cabull,0x055cf71a930304b0ull,}, + }, + { + {0xcc5f813b041ba372ull,0x1b892909c069bfd9ull,0xdfac1a47d46ba3dcull,0x1bc553fdedaa97e3ull,}, + {0x623da812c8d71640ull,0x59b3b84486ab96c5ull,0xd77a7d970676d563ull,0x09473f20b0087846ull,}, + {0x9214acc8a6ad6f76ull,0x53e9b1713dffaa0aull,0xe66631ab33f6477cull,0x16792dc3fd2138d9ull,}, + {0x612c9ffc45facb86ull,0xd43cb433555b3da3ull,0xb0ca697731e8202dull,0x141ac2b6bfa546e5ull,}, + {0x51b480946640c6a2ull,0xc71f677b6d96bb2bull,0x7e0377527663c0beull,0x036b2f551e8c7db8ull,}, + {0x09610b7524482b53ull,0x65196312af7438ccull,0x7050f94a8a70305eull,0x06fde0d46e6c059eull,}, + }, + { + {0x707927b8fc061859ull,0xd9e38cc9ebbd94ddull,0x96eba99c855f975full,0x0c12d088d263d28aull,}, + {0xfa236e22ee58216aull,0x470b1efa73ec6699ull,0x4c5457a04dbf7553ull,0x1a1dc4cbd3ccec1aull,}, + {0x9a327665f6db6d31ull,0x6443a4f683536914ull,0x58eff845741ae1d6ull,0x0b784f2a8c259646ull,}, + {0x08cfd913a263ce94ull,0xe58aab8c6b488744ull,0x335fa717116557daull,0x137bf0016a4e4c17ull,}, + {0x0c14566b7ca1106full,0xb5fac75743cf44ddull,0xe87d1d95b95cba63ull,0x1d2823802dac3d01ull,}, + {0x445099d6807bd76cull,0x41b66837529eb51bull,0x84267670e2264913ull,0x0ed84664bb37032eull,}, + }, + { + {0x938964e622d307e8ull,0x2edeb24656039ea6ull,0x642dd6f7e2144be3ull,0x1d31590cb07cb098ull,}, + {0xe57bf1b8729263c1ull,0x48f9b371fd250d79ull,0x670ce0ee36513b90ull,0x1b908986cbfec7f1ull,}, + {0x9fc8ffb876636effull,0xd57385d67c117698ull,0x4813753691eeba7full,0x0e36785e030209eaull,}, + {0xeef1935cb4c5e8f1ull,0x1b8726a75ab06681ull,0xee973c5cd718bf31ull,0x026910b1fafe0208ull,}, + {0x8c1826b08792fd9bull,0x00325e83cb419665ull,0x9cf44c5b81265badull,0x2462a8c0fc4f85f9ull,}, + {0xa4068de0bcf85b4cull,0x5292433f89646bedull,0x05b4bdd364d3bc53ull,0x1e25be7fab47bf9aull,}, + }, + { + {0x51c27ca3424bdf72ull,0x167926750fe4d445ull,0x41985a737513c6e8ull,0x070056ab60d56287ull,}, + {0x0a23d1344dfd91a0ull,0x6c518fef27a24e64ull,0x059a8c49360f8730ull,0x0f1d38b2c12772f2ull,}, + {0xaa2a1e60b126566eull,0x1ed2add1bb218007ull,0x71385f0a8fabe78eull,0x024c0880d7c0fd5aull,}, + {0xeef5704923a38ff1ull,0x34506a9872581fa9ull,0x78152bc691cbac5dull,0x0c41086d97a7fccaull,}, + {0xb0c0d854ad72b6b6ull,0xb38455c3e3e5f457ull,0xfe665f1f4ddafb6dull,0x10373cbf9ca2add9ull,}, + {0x8a306e7799aa2605ull,0x5dbca515ad2f9733ull,0x9b8b80da928edeb0ull,0x0052a2d2f8f7b1e2ull,}, + }, + { + {0x13e3e3df198f8864ull,0xc80f05cd02b931f3ull,0x8826debe7162b2f6ull,0x1d319ece62ae45e7ull,}, + {0x313e17d4fa80fd67ull,0x82c5f606bfe97eabull,0x66f092bfa6b46100ull,0x16fde5bd28d86353ull,}, + {0xcd4e7dfcd19cfb45ull,0x026d1e42ed44630bull,0x8d6b54119bc07918ull,0x1eff361145a4818bull,}, + {0xc80d511a9a448566ull,0x9df3e33a28a32065ull,0x5a5860db779cc4aaull,0x1c226a0a4bf8c193ull,}, + {0xfe0fa440138c1ebcull,0xc32c16bd93c71daaull,0x5e053ef1a9d73a8eull,0x2105d2d85afe7c65ull,}, + {0x553c6840e4d14fdfull,0x600506d781612ff5ull,0x3ab288079ba2da8full,0x19b8f14b3e9cefeaull,}, + }, + { + {0x101f9567b577f4ccull,0x9d7dfbbb95010b1eull,0x1801c3f5ef323a26ull,0x08066f8c302be6e0ull,}, + {0x301f867187aa8cc4ull,0xdcb504ccd5deb64bull,0x7a19b8838cf066e1ull,0x1ce06a9c35aa0809ull,}, + {0x010a732bda3f076eull,0xf36ad54eeb0df727ull,0xe7e3ba3699eb12eeull,0x1d65654037809723ull,}, + {0xb8ff82aa0c8f9e89ull,0x39fd76e872772dd1ull,0xd0a9a0cf7b300237ull,0x21cdd8098a877d70ull,}, + {0xfff1cbe2921532d7ull,0xe919f4cbb2b62082ull,0x43858e6488e4d9f3ull,0x227d32cd853e2a11ull,}, + {0xdd7807401672de18ull,0x7e3167a195002069ull,0xef20051461812a1full,0x1ee6ee09899caca3ull,}, + }, + { + {0x18dcb2c8d68bcf3dull,0x55c30335c441d056ull,0xddcda87759df1c4cull,0x0bd72b9d00117407ull,}, + {0x53759bf204dc6ee2ull,0x5a491198ccc07fb6ull,0x21023e765d4b082bull,0x052467582f570a64ull,}, + {0xc71f8479e69bc9efull,0x1a8b07f3a7f9f4e4ull,0x4898f9336938503bull,0x210b416bb55f686dull,}, + {0x2ea76a804a514216ull,0xaed1c043402cba72ull,0x8e96b191c8508968ull,0x0a6845487a544d0cull,}, + {0x20f8a88abe36a0fbull,0xf7be80390c4df565ull,0xb4d6ae73ab0ac7b4ull,0x03dee2bd150d75caull,}, + {0x31f41f54a9d5ba23ull,0x32d8a838645e8303ull,0x1ce68866725d4d63ull,0x16eff9d7d55f24a6ull,}, + }, + { + {0xc9ef98de3048fe85ull,0x91d247303ba2cc5dull,0xfeebf32febfe0c50ull,0x12193bd2dfc7cbaaull,}, + {0x05545cc46d6e2f10ull,0x0c1885bd6a173fe0ull,0x19192206ce77ae4dull,0x21bc567dedda3bcaull,}, + {0x0289985f4f8a3e0eull,0x46a6f360ff57d0beull,0x8ecf6d8914a57a28ull,0x16fad252e99a0f5dull,}, + {0xa1ce7650862f87aaull,0x624601ad20a0a754ull,0x181fa95e1dceca7aull,0x04c7063bf6031512ull,}, + {0x47221f77cb9dead6ull,0x0b0a1f41bf04b7daull,0x1285ec2ea904f669ull,0x05d815fd67d084b4ull,}, + {0x2f4621c7c48ac6bfull,0x6c94a7fc7433ddc8ull,0xbfbc34ad00dc77bdull,0x0d420c22daa0e425ull,}, + }, + { + {0xa125bb06b8f5ae5cull,0xf130e54b42e247baull,0xa7d5d0e59b914ac6ull,0x071f28cba94510baull,}, + {0x23781cfd40419519ull,0x2ea1f31e32e9865dull,0xb81d3422cdc1a049ull,0x09b4ecf31bed5dadull,}, + {0x7cad0528d1f2ffbdull,0x4aac3a0629f7f4f7ull,0xffa90428bf6d62ffull,0x1e313094fa295c2eull,}, + {0xac9d8af47d98869cull,0x8ecebc8bdf6c41e8ull,0x859d29cb97f9f264ull,0x0c9223c674634d76ull,}, + {0x5adcabb24bf08460ull,0xbc91aaa43338b671ull,0x7abcd2f2031ec66dull,0x19b3dbaaf6fb5a1bull,}, + {0x00b0c3d6c69380bbull,0x044a0a413e3aaea9ull,0x48d820b0f17d1ac2ull,0x1745bb82ed277652ull,}, + }, + { + {0xd921b459e78504aeull,0x79ef5733fecdb405ull,0x04020f6200148defull,0x1163b626e015e688ull,}, + {0x0781fcc9b627e44bull,0x5d8c6c8944d557a6ull,0x5493d9920c1d32fcull,0x0ecdc7510a2f454aull,}, + {0x7086854c556b47fdull,0x4ec3f6dd8ad274dbull,0x274e92925edf85deull,0x09e6aa987250022full,}, + {0xa63453a7eb3a8fb5ull,0xbd83f1e026f71f82ull,0x1386ec55e6450e3full,0x00873f000047164eull,}, + {0x179dbc93073fcb3aull,0x592c5c9b8baf6411ull,0x4b81a7b27b4d9070ull,0x1d26ead51df9a20eull,}, + {0x6a244f14dc36671cull,0xd1e9d82e3c5bea31ull,0xbd883c1283d17771ull,0x1e09e59618c6163full,}, + }, + { + {0xc1b0578027cdeed9ull,0x7ad19ad5cb04d6e5ull,0xee6f7f36d5ed1465ull,0x01d616ac45e80f5full,}, + {0x2c0c7df57e945feeull,0x9709cf12715b87afull,0xa6e99327a9e2f868ull,0x1dc75e316e45b2aeull,}, + {0xa7bc3e0832276f4bull,0x36ed99677fa22ffaull,0x89da95557e5dd91eull,0x0c1f4bf5d672d3b9ull,}, + {0x25624941c1047a5full,0x463ccb3bd3fce3b1ull,0xd115fc8570096682ull,0x17145e34ff1d0e9aull,}, + {0x4a3a34676a6a378full,0xac89a12198b0ca1cull,0xb97a2d982319e20eull,0x0caf54593dcf42e9ull,}, + {0x7a07a3d321faf4daull,0x6a062e2ec939fd56ull,0xfd7ac47f692009a9ull,0x1121561f1c332cd7ull,}, + }, + { + {0xcfb495c8f564f52cull,0x39665331e96c838bull,0x42c49998a1446b14ull,0x03cc4e294cff3ff7ull,}, + {0xd41d69b42b557d10ull,0x98dab8bd722a39a0ull,0xd4e24c4add54c81aull,0x1344527908d19fa6ull,}, + {0xe9648caa7c8da128ull,0x8497aa165fdee967ull,0xf437d75fab691b76ull,0x052cbd6eb6436a4bull,}, + {0x389f7092e280920bull,0x9b8625c09555310bull,0xe91f49f9d9031898ull,0x1c95a9d881b18be8ull,}, + {0xe8605b4d2212b1fbull,0xb1c4f57736dbf0c3ull,0x8a90c4bcc09cad9eull,0x12f03ba47d2620d4ull,}, + {0xcbd4494a5830ba3cull,0xb5a5d7b6b635fb6dull,0x154076781060c57aull,0x14e27241d5bdbe5dull,}, + }, + { + {0x5545df3af64ec9c4ull,0xff2adbc37d224acdull,0xcf02fc8672ce69ffull,0x0a7fcfe0b85478f0ull,}, + {0x402246e5d134054cull,0x0bd5980440304ad7ull,0x3df09979193914b6ull,0x22610927d3977e51ull,}, + {0x08235659dbd58c8full,0xd159c4e705d2f6d9ull,0x3c5ae22b53836251ull,0x137039c4b43f1c9dull,}, + {0x4ee6c2b196d188bbull,0x54ecda987459243eull,0xb3a9cfbf1aea2748ull,0x234243a4a87cf61eull,}, + {0x248eec552d9a5ef7ull,0xc8a98bee264e9e26ull,0xf3bcd8c268d0c073ull,0x16e365499a23e913ull,}, + {0xbb406c86a8f7f2d7ull,0x03426cc36d053972ull,0x047915ec9f472c4dull,0x2318c0030bfcee73ull,}, + }, + { + {0x3c783caa5308c82dull,0x81bcacdec8f45662ull,0xe169822ce2c0837cull,0x09c179836e05b980ull,}, + {0xf5d882cd842d337full,0x861761db32052e52ull,0xd6721854e7e686f2ull,0x0d22ec35de13a291ull,}, + {0xd9dd477876f2c6d0ull,0x5ef6dd9d6e4eb6b3ull,0xa22e8bf49d19a102ull,0x1fb12cb296762e6aull,}, + {0x8372df5211227b55ull,0xc3994286779c5c02ull,0xa302f7b3be87ac5bull,0x22b842b9b918d821ull,}, + {0x2cb75b8cb17911a1ull,0x5cd8f56c7f4dacf8ull,0x09874f95dd87d8d6ull,0x15b92554f1bdb068ull,}, + {0x4786ec1f88a80264ull,0x91dc53364f6aec54ull,0xbd9bd414e46eb290ull,0x1b27b7fd99d5e212ull,}, + }, + { + {0xbb40271789b4bb9cull,0xddf3b8f645386314ull,0xce090cc4ffeabe23ull,0x0c3920ea76b361f4ull,}, + {0x14c64e1eed2b5edeull,0x99c5289af2511b43ull,0x5de1d7b1dccb2575ull,0x0b5e4419ad2e1c52ull,}, + {0x0c04995f7bb764c0ull,0xbd9eb56e1c742072ull,0x9009271bd281dfd1ull,0x2464821365b75205ull,}, + {0x49724e13fe376d0cull,0x189fb55cbe1abfc2ull,0x80162bfa5b8980d5ull,0x1a96550a3916c5caull,}, + {0xcd79e4d9633065d2ull,0x2b51887668a49a0aull,0x8785b375ac581035ull,0x10a5547822c082bfull,}, + {0xb98da2585b65ccd3ull,0xa8015a03bee86a26ull,0x2eb6a1e1bd1cdf1bull,0x07bf364897d1c8b8ull,}, + }, + { + {0xb791c26545931abcull,0x9a1ad86e4fda79aeull,0x06855828328d0314ull,0x116650fafca899dcull,}, + {0x28a52543d8cb599cull,0xbdd390c86fa4fb40ull,0x903fff92c56629c6ull,0x0b496e3e73b93100ull,}, + {0x0f5622574884b369ull,0x48dc4ad8ee6e6c07ull,0x9bf8705b75932345ull,0x12fdae5ddc53fccbull,}, + {0xffbab25f3f4dbcc5ull,0x2e29054e3b0c795bull,0x4e42d9554507c4a9ull,0x0100c6ddccafa66full,}, + {0xd070c555e094dddeull,0xc33dd5eda3c03e59ull,0xaf83e343a270dd9aull,0x098aee3da1fa8162ull,}, + {0xad02918dc6d1048aull,0xf04903a09f8c1e95ull,0x51622aaf4848d918ull,0x1ded54a06c3901a3ull,}, + }, + { + {0x407e49d022ba5897ull,0xdb8d26843eab7b0full,0xf976a1b95413e184ull,0x0aec3abccfa3f870ull,}, + {0x5a796987e2623f7bull,0xf9ab67105d5e1b46ull,0x9d9d00cfaddf51aeull,0x1be8e30f8202ab70ull,}, + {0x793be4982c00e681ull,0x903759a9286f8a57ull,0x16a3daf170f851afull,0x13cf0c29956077fdull,}, + {0xfb5787f1092904dcull,0x9a7422c14149238aull,0xe8e61be7e9ad1fc9ull,0x10029d3e967eff2full,}, + {0x4a4887f39a050b1bull,0x2b7f2e2d718b7fa5ull,0xdcf39f9d5e4ccc87ull,0x0e9ae22b93f3c46cull,}, + {0xe2085144d647649full,0xbb22757ff04f1a8dull,0x39c323e34631d9f7ull,0x04865b0a1462c9b9ull,}, + }, + { + {0x684266fdd1482bdbull,0x49a7895fd6b87933ull,0x28476e848c17b925ull,0x19e95e89691c4ea5ull,}, + {0xe9a6a6bccaf53a2dull,0x479cccded58ddaccull,0x16049a3fd6291256ull,0x07364abc39086c40ull,}, + {0xf24da0fc6d7e4b82ull,0x29591202c08178e9ull,0xf9b5dff7dc07aae1ull,0x0ed06afda0a02f78ull,}, + {0xcac1c41fcc1f702cull,0x52b029719b5224f2ull,0xc838b665539d0364ull,0x246b61674cf835aaull,}, + {0x44068b26b9dce8e0ull,0x6b3a0b0e83a7c8b9ull,0x03feca47fb021110ull,0x10d9d6e7fbc944eaull,}, + {0x3a39ad7da63fd6fcull,0xaf3e9dde8885823full,0x31511af0a15648cfull,0x19de25d493f0200aull,}, + }, + { + {0xd4fff38e62012c13ull,0xae59ef30122850ffull,0x9d23a0381a012cf6ull,0x120ae1d814828c1full,}, + {0x42eb1c5dfbf07103ull,0xd254f031490046f0ull,0xb47882ae239b8ae8ull,0x11158120470a13baull,}, + {0xd5144f9267a09051ull,0x66da90aae84bab57ull,0x586fcfe6e1dfc445ull,0x221e49ed2a16e941ull,}, + {0xf467fe034d6cbdccull,0x7ac29c1d1e5e20feull,0xa110e6e05eb1585aull,0x23d954fcdf786a64ull,}, + {0xc1ae9be330026938ull,0x874b19ab11339205ull,0x0964cbafa59f62aeull,0x1e6167f38349f253ull,}, + {0x23efb445bd9ef627ull,0x897335bf70b7bcaeull,0xa00f86ae69e47650ull,0x2509e8fa87d5670bull,}, + }, + { + {0x22a00ec33abc6b8eull,0x09620addb21d394full,0xb965fdcb7ee143dfull,0x1febe6994e628a7bull,}, + {0x1c710a901e98b013ull,0x2801fd688f4dddf6ull,0x0abcab0ebadf8343ull,0x10f0cfd199338d92ull,}, + {0xd599e818b6e83ff6ull,0xb88539365c679f3eull,0x0313ce19b529a51dull,0x21f5f0b9f1cf3415ull,}, + {0xb59034f3ef13e954ull,0x6883ab623a40da9dull,0x94faebf81576de70ull,0x14d2247af37a0cceull,}, + {0x99757d5184162b77ull,0xf79b9dc74871c5dbull,0x608ad4501b03300bull,0x074149d915458798ull,}, + {0xa3252b36c3eda717ull,0xc1ded9f245002540ull,0x14b5755b56dac7b3ull,0x19308239f6756bf4ull,}, + }, + { + {0x07f4f5a6f26b067eull,0x32d2eb865477dbdfull,0x6945cbc86ac200a0ull,0x1e6311fd6ef61d2bull,}, + {0xa0d0920425c68e5cull,0x683d1987c8fe9e5aull,0xd7228b5e41a381faull,0x114a05f6a9f409b5ull,}, + {0xf677d47e68eeea17ull,0x87f50243b30d3112ull,0x084cf054770d8dc4ull,0x0bc9fe9990a74fb5ull,}, + {0xf22bdc5dc2eec0d2ull,0x3bae3de98c595ff4ull,0xc95e53073fd0b23bull,0x11a7e2b2d55a6ea2ull,}, + {0x8ddcbdbb83b870baull,0x728950ad96866c71ull,0xd145c1d31fae9c5cull,0x0547d0e831e70104ull,}, + {0xead79bef2b2433d9ull,0x0647d5966623bf56ull,0x4fb0056ba69d7958ull,0x1a0983813c5d2e9eull,}, + }, + { + {0x215a5a20e15d19d2ull,0xae9ceafe33084b69ull,0x80f85025ca380f77ull,0x1c19066c196d1a00ull,}, + {0x359cfc6bc545de2full,0x7339f8704a758d60ull,0x64eca98cd5f2d7edull,0x248ba44255247839ull,}, + {0xc2c6e70b389e8492ull,0xc9b97f7a19d874c9ull,0x87d7b9a332957727ull,0x0119950fe431afe3ull,}, + {0x51eeee98aaf4581cull,0x081de6981f8512e1ull,0x4bb18cf097ac6997ull,0x21e465b23c21951bull,}, + {0xe5bc584a9a1f5a1aull,0x1ccc4b14286b7ad9ull,0x435b382aeb470e64ull,0x1f9ae9143c5b987bull,}, + {0x990eccb3248cd3d9ull,0xe6cfbcdbd8c8fd0bull,0xb48de18c5009802full,0x198d98c5412a6213ull,}, + }, + { + {0x43cd5d8c9073ea61ull,0x5174db54059acdffull,0x45e871c04aa7a2ddull,0x05e16d3199d840a0ull,}, + {0x9ad1091f764df938ull,0x67637f20a74490b7ull,0xdbd73b8487d04861ull,0x15a139abaa8b478eull,}, + {0x1b10547972b4d507ull,0xf641d3763db1a739ull,0x15597787c5b84ec3ull,0x0134b78ebf335c12ull,}, + {0xf6b7a9d4052963daull,0x2d806855d9466783ull,0x623658a8a2d743dcull,0x00de0208fc0298b1ull,}, + {0x1b67ee84e8c40714ull,0x620107f4c2393038ull,0x96441ca3a07baeeeull,0x0b27368271b0f683ull,}, + {0xa65922c66ed876ebull,0xdc21179aa8971bdbull,0x9309a00b5206e041ull,0x088fc38497bf88ebull,}, + }, + { + {0xee8bf43d2fc34584ull,0x4ff6772e8da82b6bull,0xa7ae3c97dc955a78ull,0x09651f34f9ad7ab5ull,}, + {0x103de2e1906f8fd3ull,0x046ca4e6b276642full,0x220398cd397af5fdull,0x07b984811b0df962ull,}, + {0xd0519e42b872b7aaull,0x164acb4f7d9df94dull,0x54cd157448c94337ull,0x04c636efd3f59641ull,}, + {0x7cf41f52f0acc90eull,0x54dff80755d46315ull,0x83a7e3f528daec19ull,0x0039b02577bb91e6ull,}, + {0x828eb12b537a9732ull,0xd81ce0f79c6211ccull,0xcd2fd2f2e35379adull,0x1e84fa2068841dd3ull,}, + {0x931aef70f9a3a06dull,0x71abc5af88fa12caull,0xa70ddb3102a75247ull,0x14a049c881169cceull,}, + }, + { + {0xa9975bec6d3f0412ull,0x72feab9fdc81092full,0x49f533cdb7ae9d66ull,0x18632a2c4c5b4d2dull,}, + {0xaa9f81eeb706ca09ull,0xb1065065a3fe5198ull,0x3381765974ac94a8ull,0x0ec5d52c65b1f5e0ull,}, + {0xfe465050a5cd7ab1ull,0x5059fae63d47120aull,0x49ad1fd731ef0aebull,0x1e018673e33f45e5ull,}, + {0x6eebdeb52c24d248ull,0xa43988a55ccc8d10ull,0xe997fafe55d0ff64ull,0x233675abd5ad14e6ull,}, + {0x8b5530b175fbeaadull,0x27ba08984164ed08ull,0x94a9507d0189809dull,0x12fb832d1d13901cull,}, + {0x912ff6e6cf0c29f4ull,0x54d7a43121bcd1afull,0xcdf9fb448a1e2185ull,0x02aac1a8e253b8f9ull,}, + }, + { + {0x26a581d7ca270a84ull,0x989bddaaecea533cull,0xda7993327a4b8cddull,0x0c1637ca7d045160ull,}, + {0x6213cd9db7a6d076ull,0xc03037d124aded7bull,0x32d9e1bd41523d2bull,0x008ea641abbe75edull,}, + {0x7d3c23b227774f03ull,0x4a5e7805e6f9a14dull,0x1c24f1a43d487e79ull,0x18eafaffc703509bull,}, + {0xe146113f559bd9efull,0xe56825b1a7fcf7f5ull,0xa93689399f819fceull,0x14fa96013c5a6638ull,}, + {0x81c625bff8857fe7ull,0xc98edd68e7203a68ull,0xc88c3a681a3f1ac1ull,0x0bd4fa57e9b6d9f4ull,}, + {0x2dd6eb21127b1fefull,0x91b039a57e0f6233ull,0xd02548bc3dc3c783ull,0x0e8a4d19a777a688ull,}, + }, + { + {0x025c54533652a519ull,0xb3bcbf01559e8920ull,0x5c53eb97c55f25fbull,0x22322b9402949dccull,}, + {0x260ef92c70dd5c11ull,0x9e27626b6cd441acull,0xc6661507ed6f5d61ull,0x0fac1fb2f6bb53edull,}, + {0x5511ab3bd7ea4c51ull,0x6562a46409240916ull,0x83a5e441731b870dull,0x205c0c853ef83501ull,}, + {0x7c8ae57f4deec828ull,0x349dd08555bea497ull,0xcb5d3234c7b839bdull,0x153259da7d31993eull,}, + {0x964b508f6fa5bb3full,0x82b5262f18242750ull,0x970156d1896d43c2ull,0x028fc28439e44783ull,}, + {0xda5afd0f1a7d7fcaull,0xddb473f9a75a7a4cull,0x180c169ed34f6781ull,0x0cde138f3279be8bull,}, + }, + { + {0x63de6da225c321ddull,0x4832886b582d3833ull,0xb0dee708e55cb53bull,0x06c9e933c223ec30ull,}, + {0xdab1fab5dd78e263ull,0x3e658d3d9ec3bb7full,0x3d0a56ca4a1b088cull,0x008ce74540e8386dull,}, + {0x0b0ee452fc9bca4bull,0xfd0b0e032d16b266ull,0xfaeea7076b32cc91ull,0x1823f6048f88ea5cull,}, + {0x3966dc6553a5ff08ull,0x85192338024e75e5ull,0xff2cc296f92beee4ull,0x229caca8d4f809ffull,}, + {0x7702729e0d1f5157ull,0x1a3ac2432384d0bcull,0xd006954b39b11e9cull,0x118a5126dec2a2faull,}, + {0x2e9bfe6eaf026413ull,0xc720a61aef11d653ull,0x6ea67c87c36691a3ull,0x18f925014f9c61d4ull,}, + }, + { + {0xd3b27621ad1dd1abull,0xf97b0f55f22f18c9ull,0xb6113e8be6db1114ull,0x1a8a1ae8f65ead1aull,}, + {0x0007a32980115669ull,0x605196cb02f760a8ull,0xfbd2085c8671df43ull,0x0c381e59ea5960d2ull,}, + {0x94116d83a9603b67ull,0x92b23f61ccedfbbcull,0x50e0fc7e78727f5eull,0x23fc01a1d8cc7e65ull,}, + {0xd1b8a0d5024aff36ull,0x2b25d1cf4ab60e92ull,0x8dbbaf91e20c91fbull,0x185a985f30c061fcull,}, + {0x06fe112b333faa7aull,0x9323dbd6f08549bfull,0xcf5e43f668844df0ull,0x10df0c27f29e1637ull,}, + {0xf2afbd9928527e7dull,0xd856c6d7448b34ddull,0xc5e025621b375c86ull,0x01b0fe70c9b177dcull,}, + }, + { + {0xf09e65fdda5bf41cull,0x59ef2a8eb45985f0ull,0xfec4facae20ae75full,0x019f623d519953a8ull,}, + {0xd5dc50c38c7e165eull,0x62fc39995a53fcf4ull,0x557a7e55f3ae1284ull,0x0fde40ac729d9ca2ull,}, + {0x4b49ba1f5fcea25aull,0x631dbbd1d4e3cea5ull,0x7069fcd00919239full,0x09c559fb76aa0dbcull,}, + {0xbb6348d2d3a8d733ull,0x460c7255ba85e5c1ull,0x42e7d9808787c01cull,0x22c0fd2eef2261e2ull,}, + {0x19833887b93cc3abull,0x2cee6551569164daull,0x1c44fdcd7b0c79dbull,0x1807ed58575a7b33ull,}, + {0x30713388923e3b7eull,0x6d541ffc75d914c7ull,0xbbb50245851f0f6eull,0x1df0abdb9048edc2ull,}, + }, + { + {0x62788c325d2b0f0bull,0x33744819eb512733ull,0x83ff060d6ff7309cull,0x18829912bda99968ull,}, + {0xe09edb24cdbdfc6bull,0x099200c5850fc442ull,0x967790a56049a66bull,0x011cd382712b1d77ull,}, + {0x8df4e975f64427d7ull,0x2e3901a3a7b0f55dull,0x641ec6f45805e402ull,0x06e1d0db4022cd43ull,}, + {0x440dbd8590564164ull,0x6aa7d9c34c053da4ull,0xe0da2752be2f5aaaull,0x2264f00ad93d3d4aull,}, + {0x716e5f9a7e68031full,0x1bcb15653094bebaull,0xf84ac39bc138e963ull,0x1d7a1fc06adf5b63ull,}, + {0x8835962eb2e3079dull,0xc3d7c9d41261e319ull,0x30c0c53b9353bf58ull,0x03bf957dd1541c99ull,}, + }, + { + {0xe77e8891944694ccull,0x04efd57869ed85ceull,0xe9de08ffa6a88729ull,0x1d062265f1d299d3ull,}, + {0x387dab533dc83cc8ull,0xf7fa09c0bbdf31b7ull,0x59b84e1a3762d3b9ull,0x01b32660eab7f6beull,}, + {0xf7daf1d596d17df2ull,0xcd931e51341e0ebbull,0x51710bb172705525ull,0x244d6b81dbc7d273ull,}, + {0xe7a144e6eefd2dc8ull,0xf5c76e992d995cabull,0x477afe1374a66f3cull,0x1aebe5717b54fe53ull,}, + {0x541a0d7dc825c3b1ull,0x93a0cab475598133ull,0x096efa1eb12a99feull,0x17a85ece29f273fbull,}, + {0xa36f4f86b5bc5c1bull,0x1b4a0fc57947e76bull,0xaf302e3f7838388eull,0x06aadb4991feff1full,}, + }, + { + {0xd6afd4710167605eull,0x1897263cb81c98e1ull,0x90e133c23eb0207eull,0x0718643da3a96ba2ull,}, + {0x8344e521afad71f8ull,0x66af04f81ad9f156ull,0x5ecd25d48f404733ull,0x0234ffcdbb42d141ull,}, + {0x8a50c65ef686166dull,0x34cdda95694e0cacull,0xa8add01d08d2dbaaull,0x1ce98a7c6ceb5696ull,}, + {0xb1702710fa0af484ull,0xe30a4eb2f39aa3f1ull,0x7409d5afcd96441eull,0x1e0168166b2894d7ull,}, + {0x8cfa29792abed76aull,0x75d7bfbcee2073efull,0x7c0372e7080fdaedull,0x1ee8cc19eb967336ull,}, + {0x2a265f9eb8f2265eull,0x48f9b13b07b728f5ull,0x7b915e1225774e84ull,0x0d4eff23e23d5ae3ull,}, + }, + { + {0x13cc952b1ef56e58ull,0xeb3870335e75a7c9ull,0x2fe15087e3c0845bull,0x1011a2007bc71f04ull,}, + {0x472e18f407707bbbull,0x053d1dd70cceea98ull,0xe200cdc8798603d2ull,0x0bddb233bffdfc1aull,}, + {0xec920181b8484410ull,0xc6b9a9b74e18f513ull,0x84c1695c77cf9fc1ull,0x01005eda69cae7ceull,}, + {0x7c668bd94e95d9f5ull,0xbaf12b0a06fcd749ull,0x674b2e2824d6029aull,0x23c9d63fdca6307aull,}, + {0x92bd96dd3a545dceull,0xccb9355edd49cadcull,0xf49ca3d068b74eb3ull,0x1d9461936f823b86ull,}, + {0x6a2fa39fa7e93bb3ull,0x468fac8c8f151f41ull,0xd12e0aec4bb21bbeull,0x2326bbeb4405b3ebull,}, + }, + { + {0x1e029295309f1347ull,0x6589babde3a80cdbull,0x74de96ccf73da639ull,0x125810442f8c9fbaull,}, + {0x47d63700da3a6cefull,0x59c3fd0f2b9b6f35ull,0x66f1979c84873b7eull,0x02770c35ac617c99ull,}, + {0xa757e064e4f9edb2ull,0x46eb13ddfbda28f5ull,0x519177520a694aabull,0x04f6097d775debf9ull,}, + {0x072be9865dd6841dull,0x4d9d5c0fa6d6a7b1ull,0x1749ea911a952c21ull,0x15e98445e982607eull,}, + {0x6fb1b6845ce93f6dull,0x52d5387b1a0f8405ull,0xd6a11cff22d72a42ull,0x2283db33f8496ec9ull,}, + {0x77bae4ccdf2e5bf6ull,0x21812c170f736a30ull,0x5a8477a3203036fbull,0x1e667d8ca4a419f4ull,}, + }, + { + {0xfc925115198c93d4ull,0x0aebd45cf3b16db7ull,0x2f7c3d2ab0f16732ull,0x1c4b48273365c9bcull,}, + {0x2a26617f1f00e47full,0x828f68381a20ae68ull,0x0221e65b7f01b6e8ull,0x19e45e14ca4e5650ull,}, + {0x231de599fda4c7e2ull,0x55e6d0d3df2457abull,0x34f961f715fddd4aull,0x0e97e5f5fbfe6aecull,}, + {0x8f1f1a8b1b687949ull,0xbcbdae7ed35524edull,0xd7c78090035aa0b8ull,0x19f2a0d7fb844166ull,}, + {0xc397557bba8fe6a4ull,0x366daf415604f8f6ull,0xa9b99d86ac93e705ull,0x21fb72d548929de6ull,}, + {0x6a2ff9d0392aedf0ull,0xb0a90a0d10fb8fb2ull,0x5ef8e1768350ba26ull,0x24aca64027557318ull,}, + }, + { + {0x18e3eeb6b8937690ull,0x7c87ee4ffda9eb41ull,0x59d0d9e9eb070efdull,0x10b64beb52f348f5ull,}, + {0x60cb09b15da28d99ull,0xde4b5aaff3981423ull,0x7429b4169dfddfb9ull,0x199eb1a7a6de0f9full,}, + {0x450661858d54325eull,0x338439f5a896f88cull,0x9d41086dd111bec0ull,0x146d0b19b0b567ddull,}, + {0x93a470115d0544ceull,0xdbec88b263d6ba96ull,0x4162857e9d97ef77ull,0x07a4e45e194880aaull,}, + {0x7279bdde87e7ecb8ull,0xbfcc34d54c72df15ull,0x57d3ff1a2476f6c9ull,0x0f0da2351d32d405ull,}, + {0xffee1be1efc73104ull,0xb873a987a8076cb4ull,0xce026a94aa6b71f0ull,0x15d4bd558bf59554ull,}, + }, + { + {0xae631a8d76bd7f86ull,0x7e7d9176acbc845eull,0xea421fd87eb8808aull,0x20aaae552a029015ull,}, + {0x5c1c015cfce07393ull,0xc678b97a85aea9b0ull,0x1eea5259304f0a23ull,0x1464e4d058ceb8caull,}, + {0xc65d3f2d4e51915cull,0xeedd92d9fe368d68ull,0xc8df47e3a123fc9eull,0x0a40dfad54ccd6aaull,}, + {0x09a262e9428a05f8ull,0xa0510048ec69ab80ull,0x335a295aecb01ddbull,0x05d9e955d5b1a89full,}, + {0x5eb68ea11c52c37aull,0xe444556824dd8a88ull,0x8e380018a6aeef10ull,0x0442ce4eda39623dull,}, + {0xa77e431b883ec5b0ull,0xac34fb82921e9c20ull,0xa8cfc2d08ef8cfc0ull,0x24ae732a4db3bb4full,}, + }, + { + {0xd5563857f984777bull,0x538e5c618a4be3c1ull,0x5f8eff3fbeab5a7eull,0x017bdafb790e0102ull,}, + {0x6a62e076dc44c251ull,0xd4743cd8eb4cb3dfull,0x98f0d5617f07650full,0x0ef52eb4c0151010ull,}, + {0x516284d618713c13ull,0xe651d8c5769b47dfull,0x27fb0f16b90bfbdaull,0x10e729bd4403fe24ull,}, + {0x7770b670be42c842ull,0x6a9d9db10a3626b9ull,0x17676416c44a62ebull,0x2155a03fd59945caull,}, + {0xcd58941a2ba1e208ull,0x2d5e3caf14827df1ull,0x6e8dbafadc4e1635ull,0x03bbd3e6d397465aull,}, + {0x451703d643a411bbull,0xcca0c1d97355c175ull,0xc5074f56618aa2f1ull,0x04c8acdd37ef602full,}, + }, + { + {0x3f7e0caeff75a1d9ull,0x1b753ba68a2b8451ull,0xf46aeda408dbf4f5ull,0x11652b99c4365b3full,}, + {0x3f8bf5f03132d146ull,0x0b527b11a12d2424ull,0xd587034aa3632352ull,0x13ffef8175d1a563ull,}, + {0x2a30747e4ac8eeaaull,0x0aea36171552eed3ull,0x04e341313ec7b422ull,0x1fb62ea6d5e86357ull,}, + {0x13c69094d2dcc5aaull,0x54573685ddc44032ull,0xd95abdd392375f10ull,0x13a501913c2f1d0full,}, + {0x343cc1b0318577b8ull,0x98776ba96045eb10ull,0x5492dba5b5936d5dull,0x1d1bb567d6a602e6ull,}, + {0xccf58e05f8b305bdull,0x3fee26e8419548ceull,0x62c64af67fc27dc8ull,0x08456a814b2fe18bull,}, + }, + { + {0x47f8ccf69457895aull,0x66d08f143ca062fdull,0x8f0df2e2a97b4518ull,0x0cac6d2b34b243d6ull,}, + {0x758f56a94a45e6beull,0x63ed30c20cf6721cull,0x20e942550629c9ccull,0x167acfffb8203274ull,}, + {0x8e727dabacc57eb3ull,0xa2f85144ebbe15f3ull,0x7fc17e7a0a6a4291ull,0x1793c43f349e48b8ull,}, + {0xed2f91d056a5c2d3ull,0x30433d773122e8ddull,0x2c3fef6399c4f9deull,0x099b39a0e3e524f2ull,}, + {0x4cddac568a4b563cull,0xdcd1c44d3983138dull,0x2f421d9f8d71a88aull,0x01a02cb6459cdb12ull,}, + {0x68c09ced7ae8977dull,0x76cb2bf3a933cdaeull,0x6390cd95c4f85d40ull,0x1cad79870e6b2c2cull,}, + }, + { + {0xfd754584dcb80db2ull,0xb73ea36e2df2b8c0ull,0x3ca5645bffb60c04ull,0x1280d1e1f4dd4da6ull,}, + {0x75a069b69ae4403aull,0xbbf6c5ded1f82c60ull,0x34919f2295d7b5b4ull,0x1f7bc94e3a96507bull,}, + {0x9255ca27cb288f9dull,0x760719cfb400f56full,0x291bfbf807781368ull,0x15fa25b272fee67eull,}, + {0x6054f038190f5f6cull,0xe0978a57792a09bdull,0x1ed22ba69556fe50ull,0x20ba270b20baf856ull,}, + {0x55de530a1af249d0ull,0x249e57b2414ceb2cull,0xd98bdcde7f16edfcull,0x0ee1bfb7da744ae4ull,}, + {0x01b24c4d0bb96ddfull,0x32239e98244d75f0ull,0x20dc68759c157d45ull,0x0120769b781bc14eull,}, + }, + { + {0x4f93886e58c4695full,0x85d6a1914aba1d04ull,0x65bb00f8cf495806ull,0x22a2413c698ae97aull,}, + {0x5e7928222bb02f69ull,0x93a92c850ce1dfb0ull,0xab3eda670f968b1aull,0x1d80886e0fba63ffull,}, + {0x672372572dbdeb59ull,0xba4cd6dd6cb11489ull,0xc74f1c6e3b714d1bull,0x1680ad98da380987ull,}, + {0xbad24d644fd9ab88ull,0x5c817abf11d3ce46ull,0x50587e12664ad6ebull,0x13505c240ec7b092ull,}, + {0x69ade81d2b6d1284ull,0xdd1d9aacd53d3f77ull,0x0888b2de31545a07ull,0x110788f6944c78e4ull,}, + {0x81032f6ea72116caull,0xfcb0253b20bea779ull,0x3d0a38d424eba36eull,0x07bdfcb51526c1e5ull,}, + }, + { + {0xebb80cf2cf44bfbeull,0xb8d559e318097038ull,0x212ed4c3d148be8eull,0x07028dcc862fbbb7ull,}, + {0x91e0a395d89f04d4ull,0xf777ae0142ff07c1ull,0x546b9b47f738fa6eull,0x01c284ef516920c6ull,}, + {0x2042edb5a4eb2cdcull,0xc69cefe0a36a7068ull,0x54471d65b3238311ull,0x077562b3344b4304ull,}, + {0xdb85089b11ece88dull,0x5c27780550f90569ull,0xb9607c12434a6b3dull,0x0d02a6324718f932ull,}, + {0x22ef9b5c8b453c5dull,0x6fdc3875e9247830ull,0x20e375065f9e593aull,0x2351c044ce0d933aull,}, + {0xfa0fcb482093eacbull,0xf8d695e8413f5acdull,0xc7020d8c84a2d773ull,0x11bf7584e5283fa1ull,}, + }, + { + {0xc6b304aa2adf2dfcull,0x19aac2d5544ee834ull,0xb7966f8cd629c330ull,0x1bc72a08a8bf8f9bull,}, + {0x18a5f463799112c7ull,0x4f14db51e967ebc3ull,0xa5ddb48f64db5e8eull,0x15b4fdd8610f3a32ull,}, + {0xe7b86b479d7e2293ull,0x931034487abf490dull,0x8c40ab7dfd28a196ull,0x1d981d3918fdc3b5ull,}, + {0x00797000c2afd324ull,0xf2954f0f86622806ull,0x8464fe0995cd3a7dull,0x0f0a74df4ca00cc3ull,}, + {0x639707b1839c8330ull,0x9c8d491ad7d779a9ull,0x576b7e0f24ce5f46ull,0x21fbdcc42ccd04c2ull,}, + {0x4578db4bdfd55434ull,0x1126933c97e9f4dcull,0xe64529a8921d7415ull,0x12e48bab87ea1fe3ull,}, + }, + { + {0x3f6d2fd04bd5ed75ull,0x65e464cdac7d235bull,0x45903a63a3608961ull,0x1f60c825bccd55c9ull,}, + {0x36b33d0fb8528047ull,0xc8d1f1ad82683baeull,0x78f4b80065c2e4c6ull,0x2066f32874bd1228ull,}, + {0x8b6d6a4b986e8d4cull,0x58f6f275f1d020f4ull,0xe4f3c16209e87ad5ull,0x1cdc33d41ad30173ull,}, + {0x9ec18a6cba3fb3ecull,0x31fc74b68ac834c6ull,0x256788ece76e37b0ull,0x13de6919841928e1ull,}, + {0xae46aa08773971f6ull,0xacd04d9698d47643ull,0x3667178a594f2153ull,0x19a0cadfa3cb7fa0ull,}, + {0x228420456325e079ull,0x3e4ec53c418fdae9ull,0xb9fee919e867c6f1ull,0x2272413f3e989842ull,}, + }, + { + {0x6420ee94e7c764dcull,0x87b3c986d488deecull,0x11dc3e6b59de7ffbull,0x14bb613bce5792e2ull,}, + {0xcc0b60cd4e352976ull,0x794b585f70a5b463ull,0x415cb954036ba631ull,0x1e521f8201ca4258ull,}, + {0xd707ac91ecd5dbdaull,0x08ffd44e5fd83cc6ull,0xa5f39e0f8dff5afcull,0x02315f6a55599212ull,}, + {0x2cdbd9f11596e797ull,0x7c560adedcf2cb25ull,0xdc474409e5650d9dull,0x158bc955e7e492e2ull,}, + {0xd6023b14352a1766ull,0xd5c271d942b6541dull,0x5dc4d1c72d25258full,0x0753f065a4cb028eull,}, + {0x11b4229a4c62010aull,0x2949cb6b089b3aa9ull,0x01b8bdc50766366dull,0x1094dfda1e2e5e57ull,}, + }, + { + {0x773cc6e1ac12f73eull,0x77686f8d75a83e9eull,0x7ce94b7ef1bd53a0ull,0x005a7d3e75c16332ull,}, + {0xafdc64df2ceca388ull,0x15be551bbca0e367ull,0x62d9b7608cf3b8a2ull,0x11ddfe7a0a96af25ull,}, + {0x5d23851a77554f67ull,0xa0f51815094e8050ull,0x930af7569c7850d7ull,0x108eb034eeda1460ull,}, + {0x28a80b277688cae3ull,0xd09ef5d30ec9b193ull,0xb6c554e32540d421ull,0x1da12923355fd2faull,}, + {0x9db6509d0130494dull,0xe28936417c250459ull,0xde8b4491aa8d1dc1ull,0x194b8e7bfc005322ull,}, + {0x7aaeb4f2f941741bull,0xf9d7b55b452158f8ull,0x17e172a187f68105ull,0x02f620bde277950aull,}, + }, + { + {0xf555a7766ac21481ull,0x82b12050c9449770ull,0x7bd16da27eff49fcull,0x06d1ad9a6cd69b71ull,}, + {0xa059542aa0f64e9full,0x93671f16b269a351ull,0x795262fddcb7cc3eull,0x199f355d6263cf86ull,}, + {0x0cbf707f1f8f73aeull,0xf483501e15982b44ull,0x2456aaa4d84d80c0ull,0x0d0ffb5393f7dd0aull,}, + {0x62999996c09097e2ull,0x1b87e828f9fc66e4ull,0x6b17eb3166967f57ull,0x1603601303478f52ull,}, + {0xfb776d4fd407d485ull,0xac03efdb746bf127ull,0x57bde58a5671a601ull,0x0cfbfa20d141f05cull,}, + {0x625ac1161752cbe2ull,0xe3348570b6ad71bcull,0x155b3911f5335f75ull,0x1679ec68122edc64ull,}, + }, + { + {0x9334b4c82aee3ef8ull,0x7ea393af9d865ce4ull,0x0f4ee0906b864850ull,0x1d9e34461e27cc61ull,}, + {0x921b1a6aa179a081ull,0xcca25db2d609388dull,0x816b69ad9a56a314ull,0x00eb3f6388c4d375ull,}, + {0x04e25f4225e50e72ull,0x59a20b6edf897f2aull,0x0842d5f5823535b4ull,0x0dceaf5ae8e50885ull,}, + {0xac6598257175aa0aull,0x1d5d21e8129f2efaull,0xe81dcc9497cb17fdull,0x11327c40c92dff80ull,}, + {0x149e4b2c0a3bfd81ull,0xb8efe68c475436ebull,0x3a8bf06e9ca15cd8ull,0x152d72639c6e5308ull,}, + {0x217e0e34f3f76b8bull,0x5c722d926b596985ull,0x45417905be08807bull,0x1e6132b54ad5595eull,}, + }, + { + {0xe5b541097726667dull,0x5583dfb4ade471adull,0x1840bff44a2faef2ull,0x093c23f8028fe3b9ull,}, + {0xe1e3347370f6e6c7ull,0x8dd7352c4dcc2a17ull,0x3cade218210f9e29ull,0x190ff57eac6e8b87ull,}, + {0x34905e72c173fdc3ull,0x59f8c6f4373c834eull,0x1bd9feabed806c99ull,0x1f209a7935a8ba38ull,}, + {0xe44f080023c83b49ull,0xfd2006276058693cull,0x44b43b6e462a32cbull,0x0942a0ed8e4657ebull,}, + {0xf7e53796340fd772ull,0xf8219ede4152370full,0x548b9b002c19940cull,0x1d0aaff93f50f52full,}, + {0xb5987eb545462ddaull,0xe0f29867116336edull,0xcc75a11c3ff8374aull,0x144d0b8fda0a44a9ull,}, + }, + { + {0x676408d2ff1a7593ull,0xc96a8077d911776full,0x9efff30500904c63ull,0x100a6093df2ae343ull,}, + {0xf1f92502b846cf30ull,0x57888806036aec6cull,0x310ceb0b04caaa7cull,0x1192819a3058307bull,}, + {0xbbf882b39fec7883ull,0x4079d241f7e6e0efull,0xb3090a69b3c7261full,0x16440a02d7fb5d2dull,}, + {0x70e9c8a88422df45ull,0x48fa15635ca49bd9ull,0x0430c461bfb96d16ull,0x0a29a4007c99f6d1ull,}, + {0x643a2bdb308a297cull,0xe4a5bca158e65ff6ull,0xc8dd1579abdeb9e5ull,0x1ee4a94b3d6c775cull,}, + {0xc085b2622b5c4480ull,0x8c69048c5fcded96ull,0x418ba7bd3260d85dull,0x0b22158bb6c29f9eull,}, + }, + { + {0xf661abe667e83f01ull,0x41068a7e95fd10c0ull,0xc9c4cc186cb3eb72ull,0x1a95a93a30592461ull,}, + {0x78dfc65c7280895eull,0xb9f1514b98add459ull,0xc7d713fd92025a11ull,0x0dbe6c1ceabcf73full,}, + {0xe35368a946428244ull,0x990da5e2783a2762ull,0x686b61b7775fb02cull,0x1a79e39b78922172ull,}, + {0xbf8ca28c8d95600full,0x0f56487a909e51cbull,0xfa1da11e3018a2faull,0x07a32571b231773cull,}, + {0x46c84d812bce56f5ull,0x84aa8d8bfe2b498cull,0x699ad1f34e22d74cull,0x0ad743bd99c458dbull,}, + {0xa8d16c7e09aa59b0ull,0x59ba8cbe75f31d51ull,0x5c68705d7838ff4eull,0x1c863feb5090e87eull,}, + }, + { + {0x86af66313ed193baull,0xa0902147163778b5ull,0xa101fcdc6b2d6191ull,0x12fbff4713e6eb10ull,}, + {0x9e1abdaf6e329c66ull,0xd8de2fb4db8e7554ull,0xb4374e1e93a0171bull,0x0ba2ecd00749208full,}, + {0x0cad8f57c02ce090ull,0xcac04eddadd338ecull,0x7ee5c235934f9918ull,0x24db5a9b0ad7ed64ull,}, + {0x46288ad8e01c5063ull,0x4b4c58654226c44aull,0xc4974aaf56ae42dfull,0x173e64cdd5661536ull,}, + {0x58b3450781e7e080ull,0x14ab3a25a5e64bbcull,0x3f9f91743276d2f5ull,0x0e101d0b89b81cdcull,}, + {0xa6bca5fbe99b2b7full,0x5fb8817e670ef40eull,0xb44cbcb05de76cb3ull,0x17110ed4912babb5ull,}, + }, + { + {0x6745e77f4e05d8edull,0xed278e7875ebb5fdull,0x3662f60864a8ccd2ull,0x028104ffc0a31868ull,}, + {0x740b76d64f25c9f0ull,0xb519a415132160e7ull,0x550a38ed829c5f68ull,0x04ea27d6deefcfabull,}, + {0x32d82ea897185651ull,0x04a8f5b63a90573aull,0x2c88fdfba241b62full,0x0285780fe0b77687ull,}, + {0xfb6ebce4f4b20f13ull,0x8ce24ff3dad1a3c7ull,0x716f93b316af50c2ull,0x0a09e678713447efull,}, + {0x6868a19728642ca6ull,0x4be5579c08e0a30cull,0xbd630b8f9c3d1552ull,0x0f277cf26c8e60f2ull,}, + {0x1a105d54bc290b18ull,0xa7e1a7c716529370ull,0x6e5a6c5b44350fd0ull,0x1fd2ae638488fccbull,}, + }, +}; +#endif diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core b/vendor/github.com/dexon-foundation/dexon-consensus-core deleted file mode 120000 index 2955f486a..000000000 --- a/vendor/github.com/dexon-foundation/dexon-consensus-core +++ /dev/null @@ -1 +0,0 @@ -../../../../../dexon-foundation/dexon-consensus-core
\ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/common/event.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/common/event.go new file mode 100644 index 000000000..dab34a5f6 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/common/event.go @@ -0,0 +1,102 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package common + +import ( + "container/heap" + "sync" + "time" +) + +type timeEventFn func(time.Time) + +type timeEvent struct { + t time.Time + fn timeEventFn +} + +// timeEvents implements a Min-Heap structure. +type timeEvents []timeEvent + +func (h timeEvents) Len() int { return len(h) } +func (h timeEvents) Less(i, j int) bool { return h[i].t.Before(h[j].t) } +func (h timeEvents) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h *timeEvents) Push(x interface{}) { + *h = append(*h, x.(timeEvent)) +} +func (h *timeEvents) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +// Event implements the Observer pattern. +type Event struct { + timeEvents timeEvents + timeEventsLock sync.Mutex +} + +// NewEvent creates a new event instance. +func NewEvent() *Event { + te := timeEvents{} + heap.Init(&te) + return &Event{ + timeEvents: te, + } +} + +// RegisterTime to get notified on and after specific time. +func (e *Event) RegisterTime(t time.Time, fn timeEventFn) { + e.timeEventsLock.Lock() + defer e.timeEventsLock.Unlock() + heap.Push(&e.timeEvents, timeEvent{ + t: t, + fn: fn, + }) +} + +// NotifyTime and trigger function callback. +func (e *Event) NotifyTime(t time.Time) { + fns := func() (fns []timeEventFn) { + e.timeEventsLock.Lock() + defer e.timeEventsLock.Unlock() + if len(e.timeEvents) == 0 { + return + } + for !t.Before(e.timeEvents[0].t) { + te := heap.Pop(&e.timeEvents).(timeEvent) + fns = append(fns, te.fn) + if len(e.timeEvents) == 0 { + return + } + } + return + }() + for _, fn := range fns { + fn(t) + } +} + +// Reset clears all pending event +func (e *Event) Reset() { + e.timeEventsLock.Lock() + defer e.timeEventsLock.Unlock() + e.timeEvents = timeEvents{} +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/common/logger.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/common/logger.go new file mode 100644 index 000000000..9dc5d231c --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/common/logger.go @@ -0,0 +1,87 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package common + +import "log" + +// Logger define the way to receive logs from Consensus instance. +// NOTE: parameter in 'ctx' should be paired as key-value mapping. For example, +// to log an error with message: +// logger.Error("some message", "error", err) +// which is similar to loggers with context: +// logger.Error("some message", map[string]interface{}{ +// "error": err, +// }) +type Logger interface { + // Info logs info level logs. + Debug(msg string, ctx ...interface{}) + Info(msg string, ctx ...interface{}) + Warn(msg string, ctx ...interface{}) + Error(msg string, ctx ...interface{}) +} + +// NullLogger logs nothing. +type NullLogger struct{} + +// Debug implements Logger interface. +func (logger *NullLogger) Debug(msg string, ctx ...interface{}) { +} + +// Info implements Logger interface. +func (logger *NullLogger) Info(msg string, ctx ...interface{}) { +} + +// Warn implements Logger interface. +func (logger *NullLogger) Warn(msg string, ctx ...interface{}) { +} + +// Error implements Logger interface. +func (logger *NullLogger) Error(msg string, ctx ...interface{}) { +} + +// SimpleLogger logs everything. +type SimpleLogger struct{} + +// composeVargs makes (msg, ctx...) could be pass to log.Println +func composeVargs(msg string, ctxs []interface{}) []interface{} { + args := []interface{}{msg} + for _, c := range ctxs { + args = append(args, c) + } + return args +} + +// Debug implements Logger interface. +func (logger *SimpleLogger) Debug(msg string, ctx ...interface{}) { + log.Println(composeVargs(msg, ctx)...) +} + +// Info implements Logger interface. +func (logger *SimpleLogger) Info(msg string, ctx ...interface{}) { + log.Println(composeVargs(msg, ctx)...) +} + +// Warn implements Logger interface. +func (logger *SimpleLogger) Warn(msg string, ctx ...interface{}) { + log.Println(composeVargs(msg, ctx)...) +} + +// Error implements Logger interface. +func (logger *SimpleLogger) Error(msg string, ctx ...interface{}) { + log.Println(composeVargs(msg, ctx)...) +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/common/types.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/common/types.go new file mode 100644 index 000000000..f51a51953 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/common/types.go @@ -0,0 +1,107 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package common + +import ( + "bytes" + "encoding/hex" + "sort" + "time" +) + +const ( + // HashLength is the length of a hash in DEXON. + HashLength = 32 +) + +// Hash is the basic hash type in DEXON. +type Hash [HashLength]byte + +func (h Hash) String() string { + return hex.EncodeToString([]byte(h[:])) +} + +// Bytes return the hash as slice of bytes. +func (h Hash) Bytes() []byte { + return h[:] +} + +// Equal compares if two hashes are the same. +func (h Hash) Equal(hp Hash) bool { + return h == hp +} + +// Less compares if current hash is lesser. +func (h Hash) Less(hp Hash) bool { + return bytes.Compare(h[:], hp[:]) < 0 +} + +// MarshalText implements the encoding.TextMarhsaler interface. +func (h Hash) MarshalText() ([]byte, error) { + result := make([]byte, hex.EncodedLen(HashLength)) + hex.Encode(result, h[:]) + return result, nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (h *Hash) UnmarshalText(text []byte) error { + _, err := hex.Decode(h[:], text) + return err +} + +// Hashes is for sorting hashes. +type Hashes []Hash + +func (hs Hashes) Len() int { return len(hs) } +func (hs Hashes) Less(i, j int) bool { return hs[i].Less(hs[j]) } +func (hs Hashes) Swap(i, j int) { hs[i], hs[j] = hs[j], hs[i] } + +// SortedHashes is a slice of hashes sorted in ascending order. +type SortedHashes Hashes + +// NewSortedHashes converts a slice of hashes to a sorted one. It's a +// firewall to prevent us from assigning unsorted hashes to a variable +// declared as SortedHashes directly. +func NewSortedHashes(hs Hashes) SortedHashes { + sort.Sort(hs) + return SortedHashes(hs) +} + +// ByTime implements sort.Interface for time.Time. +type ByTime []time.Time + +func (t ByTime) Len() int { return len(t) } +func (t ByTime) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t ByTime) Less(i, j int) bool { return t[i].Before(t[j]) } diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/common/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/common/utils.go new file mode 100644 index 000000000..7e89c059d --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/common/utils.go @@ -0,0 +1,14 @@ +package common + +import ( + "math/rand" +) + +// NewRandomHash returns a random Hash-like value. +func NewRandomHash() Hash { + x := Hash{} + for i := 0; i < HashLength; i++ { + x[i] = byte(rand.Int() % 256) + } + return x +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/agreement-state.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/agreement-state.go new file mode 100644 index 000000000..56c6c274d --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/agreement-state.go @@ -0,0 +1,152 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "fmt" + "math" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// Errors for agreement state module. +var ( + ErrNoEnoughVoteInPrepareState = fmt.Errorf("no enough vote in prepare state") + ErrNoEnoughVoteInAckState = fmt.Errorf("no enough vote in ack state") +) + +// agreementStateType is the state of agreement +type agreementStateType int + +// agreementStateType enum. +const ( + stateInitial agreementStateType = iota + statePreCommit + stateCommit + stateForward +) + +var nullBlockHash = common.Hash{} +var skipBlockHash common.Hash + +func init() { + for idx := range skipBlockHash { + skipBlockHash[idx] = 0xff + } +} + +type agreementState interface { + state() agreementStateType + nextState() (agreementState, error) + clocks() int +} + +//----- InitialState ----- +type initialState struct { + a *agreementData +} + +func newInitialState(a *agreementData) *initialState { + return &initialState{a: a} +} + +func (s *initialState) state() agreementStateType { return stateInitial } +func (s *initialState) clocks() int { return 0 } +func (s *initialState) nextState() (agreementState, error) { + s.a.lock.Lock() + defer s.a.lock.Unlock() + hash := s.a.recv.ProposeBlock() + s.a.recv.ProposeVote(&types.Vote{ + Type: types.VoteInit, + BlockHash: hash, + Period: s.a.period, + }) + return newPreCommitState(s.a), nil +} + +//----- PreCommitState ----- +type preCommitState struct { + a *agreementData +} + +func newPreCommitState(a *agreementData) *preCommitState { + return &preCommitState{a: a} +} + +func (s *preCommitState) state() agreementStateType { return statePreCommit } +func (s *preCommitState) clocks() int { return 2 } +func (s *preCommitState) nextState() (agreementState, error) { + s.a.lock.RLock() + defer s.a.lock.RUnlock() + hash := s.a.lockValue + if hash == nullBlockHash { + hash = s.a.leader.leaderBlockHash() + } + s.a.recv.ProposeVote(&types.Vote{ + Type: types.VotePreCom, + BlockHash: hash, + Period: s.a.period, + }) + return newCommitState(s.a), nil +} + +//----- CommitState ----- +type commitState struct { + a *agreementData +} + +func newCommitState(a *agreementData) *commitState { + return &commitState{a: a} +} + +func (s *commitState) state() agreementStateType { return stateCommit } +func (s *commitState) clocks() int { return 2 } +func (s *commitState) nextState() (agreementState, error) { + hash, ok := s.a.countVote(s.a.period, types.VotePreCom) + s.a.lock.Lock() + defer s.a.lock.Unlock() + if ok && hash != skipBlockHash { + s.a.lockValue = hash + s.a.lockRound = s.a.period + } else { + hash = skipBlockHash + } + s.a.recv.ProposeVote(&types.Vote{ + Type: types.VoteCom, + BlockHash: hash, + Period: s.a.period, + }) + return newForwardState(s.a), nil +} + +// ----- ForwardState ----- +type forwardState struct { + a *agreementData +} + +func newForwardState(a *agreementData) *forwardState { + return &forwardState{a: a} +} + +func (s *forwardState) state() agreementStateType { return stateForward } +func (s *forwardState) clocks() int { return math.MaxInt32 } + +func (s *forwardState) nextState() (agreementState, error) { + return s, nil +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/agreement.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/agreement.go new file mode 100644 index 000000000..8618b5ff0 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/agreement.go @@ -0,0 +1,430 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// Errors for agreement module. +var ( + ErrNotInNotarySet = fmt.Errorf("not in notary set") + ErrIncorrectVoteSignature = fmt.Errorf("incorrect vote signature") +) + +// ErrFork for fork error in agreement. +type ErrFork struct { + nID types.NodeID + old, new common.Hash +} + +func (e *ErrFork) Error() string { + return fmt.Sprintf("fork is found for %s, old %s, new %s", + e.nID.String(), e.old, e.new) +} + +// ErrForkVote for fork vote error in agreement. +type ErrForkVote struct { + nID types.NodeID + old, new *types.Vote +} + +func (e *ErrForkVote) Error() string { + return fmt.Sprintf("fork vote is found for %s, old %s, new %s", + e.nID.String(), e.old, e.new) +} + +func newVoteListMap() []map[types.NodeID]*types.Vote { + listMap := make([]map[types.NodeID]*types.Vote, types.MaxVoteType) + for idx := range listMap { + listMap[idx] = make(map[types.NodeID]*types.Vote) + } + return listMap +} + +// agreementReceiver is the interface receiving agreement event. +type agreementReceiver interface { + ProposeVote(vote *types.Vote) + ProposeBlock() common.Hash + ConfirmBlock(common.Hash, map[types.NodeID]*types.Vote) +} + +type pendingBlock struct { + block *types.Block + receivedTime time.Time +} + +type pendingVote struct { + vote *types.Vote + receivedTime time.Time +} + +// agreementData is the data for agreementState. +type agreementData struct { + recv agreementReceiver + + ID types.NodeID + leader *leaderSelector + lockValue common.Hash + lockRound uint64 + period uint64 + requiredVote int + votes map[uint64][]map[types.NodeID]*types.Vote + lock sync.RWMutex + blocks map[types.NodeID]*types.Block + blocksLock sync.Mutex +} + +// agreement is the agreement protocal describe in the Crypto Shuffle Algorithm. +type agreement struct { + state agreementState + data *agreementData + aID *atomic.Value + notarySet map[types.NodeID]struct{} + hasOutput bool + lock sync.RWMutex + pendingBlock []pendingBlock + pendingVote []pendingVote + candidateBlock map[common.Hash]*types.Block + fastForward chan uint64 + authModule *Authenticator +} + +// newAgreement creates a agreement instance. +func newAgreement( + ID types.NodeID, + recv agreementReceiver, + notarySet map[types.NodeID]struct{}, + leader *leaderSelector, + authModule *Authenticator) *agreement { + agreement := &agreement{ + data: &agreementData{ + recv: recv, + ID: ID, + leader: leader, + }, + aID: &atomic.Value{}, + candidateBlock: make(map[common.Hash]*types.Block), + fastForward: make(chan uint64, 1), + authModule: authModule, + } + agreement.stop() + return agreement +} + +// restart the agreement +func (a *agreement) restart( + notarySet map[types.NodeID]struct{}, aID types.Position) { + + func() { + a.lock.Lock() + defer a.lock.Unlock() + a.data.lock.Lock() + defer a.data.lock.Unlock() + a.data.blocksLock.Lock() + defer a.data.blocksLock.Unlock() + a.data.votes = make(map[uint64][]map[types.NodeID]*types.Vote) + a.data.votes[1] = newVoteListMap() + a.data.period = 1 + a.data.blocks = make(map[types.NodeID]*types.Block) + a.data.requiredVote = len(notarySet)/3*2 + 1 + a.data.leader.restart() + a.data.lockValue = nullBlockHash + a.data.lockRound = 1 + a.fastForward = make(chan uint64, 1) + a.hasOutput = false + a.state = newInitialState(a.data) + a.notarySet = notarySet + a.candidateBlock = make(map[common.Hash]*types.Block) + a.aID.Store(aID) + }() + + expireTime := time.Now().Add(-10 * time.Second) + replayBlock := make([]*types.Block, 0) + func() { + a.lock.Lock() + defer a.lock.Unlock() + newPendingBlock := make([]pendingBlock, 0) + for _, pending := range a.pendingBlock { + if pending.block.Position == aID { + replayBlock = append(replayBlock, pending.block) + } else if pending.receivedTime.After(expireTime) { + newPendingBlock = append(newPendingBlock, pending) + } + } + a.pendingBlock = newPendingBlock + }() + + replayVote := make([]*types.Vote, 0) + func() { + a.lock.Lock() + defer a.lock.Unlock() + newPendingVote := make([]pendingVote, 0) + for _, pending := range a.pendingVote { + if pending.vote.Position == aID { + replayVote = append(replayVote, pending.vote) + } else if pending.receivedTime.After(expireTime) { + newPendingVote = append(newPendingVote, pending) + } + } + a.pendingVote = newPendingVote + }() + + for _, block := range replayBlock { + a.processBlock(block) + } + + for _, vote := range replayVote { + a.processVote(vote) + } +} + +func (a *agreement) stop() { + a.restart(make(map[types.NodeID]struct{}), types.Position{ + ChainID: math.MaxUint32, + }) +} + +// clocks returns how many time this state is required. +func (a *agreement) clocks() int { + return a.state.clocks() +} + +// agreementID returns the current agreementID. +func (a *agreement) agreementID() types.Position { + return a.aID.Load().(types.Position) +} + +// nextState is called at the specific clock time. +func (a *agreement) nextState() (err error) { + a.state, err = a.state.nextState() + return +} + +func (a *agreement) sanityCheck(vote *types.Vote) error { + if exist := func() bool { + a.lock.RLock() + defer a.lock.RUnlock() + _, exist := a.notarySet[vote.ProposerID] + return exist + }(); !exist { + return ErrNotInNotarySet + } + ok, err := verifyVoteSignature(vote) + if err != nil { + return err + } + if !ok { + return ErrIncorrectVoteSignature + } + return nil +} + +func (a *agreement) checkForkVote(vote *types.Vote) error { + if err := func() error { + a.data.lock.RLock() + defer a.data.lock.RUnlock() + if votes, exist := a.data.votes[vote.Period]; exist { + if oldVote, exist := votes[vote.Type][vote.ProposerID]; exist { + if vote.BlockHash != oldVote.BlockHash { + return &ErrForkVote{vote.ProposerID, oldVote, vote} + } + } + } + return nil + }(); err != nil { + return err + } + return nil +} + +// prepareVote prepares a vote. +func (a *agreement) prepareVote(vote *types.Vote) (err error) { + vote.Position = a.agreementID() + err = a.authModule.SignVote(vote) + return +} + +// processVote is the entry point for processing Vote. +func (a *agreement) processVote(vote *types.Vote) error { + if err := a.sanityCheck(vote); err != nil { + return err + } + if vote.Position != a.agreementID() { + a.lock.Lock() + defer a.lock.Unlock() + a.pendingVote = append(a.pendingVote, pendingVote{ + vote: vote, + receivedTime: time.Now().UTC(), + }) + return nil + } + if err := a.checkForkVote(vote); err != nil { + return err + } + + a.data.lock.Lock() + defer a.data.lock.Unlock() + if _, exist := a.data.votes[vote.Period]; !exist { + a.data.votes[vote.Period] = newVoteListMap() + } + a.data.votes[vote.Period][vote.Type][vote.ProposerID] = vote + if !a.hasOutput && vote.Type == types.VoteCom { + if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok && + hash != skipBlockHash { + a.hasOutput = true + a.data.recv.ConfirmBlock(hash, + a.data.votes[vote.Period][types.VoteCom]) + return nil + } + } else if a.hasOutput { + return nil + } + + // Check if the agreement requires fast-forwarding. + if vote.Type == types.VotePreCom { + if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok && + hash != skipBlockHash { + // Condition 1. + if a.data.period >= vote.Period && vote.Period > a.data.lockRound && + vote.BlockHash != a.data.lockValue { + a.data.lockValue = hash + a.data.lockRound = vote.Period + a.fastForward <- a.data.period + 1 + return nil + } + // Condition 2. + if vote.Period > a.data.period { + a.data.lockValue = hash + a.data.lockRound = vote.Period + a.fastForward <- vote.Period + return nil + } + } + } + // Condition 3. + if vote.Type == types.VoteCom && vote.Period >= a.data.period && + len(a.data.votes[vote.Period][types.VoteCom]) >= a.data.requiredVote { + a.fastForward <- vote.Period + 1 + return nil + } + return nil +} + +func (a *agreement) done() <-chan struct{} { + ch := make(chan struct{}, 1) + if a.hasOutput { + ch <- struct{}{} + } else { + select { + case period := <-a.fastForward: + if period <= a.data.period { + break + } + a.data.setPeriod(period) + a.state = newPreCommitState(a.data) + ch <- struct{}{} + default: + } + } + return ch +} + +// processBlock is the entry point for processing Block. +func (a *agreement) processBlock(block *types.Block) error { + a.data.blocksLock.Lock() + defer a.data.blocksLock.Unlock() + if block.Position != a.agreementID() { + a.pendingBlock = append(a.pendingBlock, pendingBlock{ + block: block, + receivedTime: time.Now().UTC(), + }) + return nil + } + if b, exist := a.data.blocks[block.ProposerID]; exist { + if b.Hash != block.Hash { + return &ErrFork{block.ProposerID, b.Hash, block.Hash} + } + return nil + } + if err := a.data.leader.processBlock(block); err != nil { + return err + } + a.data.blocks[block.ProposerID] = block + a.addCandidateBlock(block) + return nil +} + +func (a *agreement) addCandidateBlock(block *types.Block) { + a.lock.Lock() + defer a.lock.Unlock() + a.candidateBlock[block.Hash] = block +} + +func (a *agreement) findCandidateBlock(hash common.Hash) (*types.Block, bool) { + a.lock.RLock() + defer a.lock.RUnlock() + b, e := a.candidateBlock[hash] + return b, e +} + +func (a *agreementData) countVote(period uint64, voteType types.VoteType) ( + blockHash common.Hash, ok bool) { + a.lock.RLock() + defer a.lock.RUnlock() + return a.countVoteNoLock(period, voteType) +} + +func (a *agreementData) countVoteNoLock( + period uint64, voteType types.VoteType) (blockHash common.Hash, ok bool) { + votes, exist := a.votes[period] + if !exist { + return + } + candidate := make(map[common.Hash]int) + for _, vote := range votes[voteType] { + if _, exist := candidate[vote.BlockHash]; !exist { + candidate[vote.BlockHash] = 0 + } + candidate[vote.BlockHash]++ + } + for candidateHash, votes := range candidate { + if votes >= a.requiredVote { + blockHash = candidateHash + ok = true + return + } + } + return +} + +func (a *agreementData) setPeriod(period uint64) { + for i := a.period + 1; i <= period; i++ { + if _, exist := a.votes[i]; !exist { + a.votes[i] = newVoteListMap() + } + } + a.period = period +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/authenticator.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/authenticator.go new file mode 100644 index 000000000..5415f967c --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/authenticator.go @@ -0,0 +1,141 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// Authenticator verify data owner. +type Authenticator struct { + prvKey crypto.PrivateKey + pubKey crypto.PublicKey + proposerID types.NodeID +} + +// NewAuthenticator constructs an Authenticator instance. +func NewAuthenticator(prvKey crypto.PrivateKey) (auth *Authenticator) { + auth = &Authenticator{ + prvKey: prvKey, + pubKey: prvKey.PublicKey(), + } + auth.proposerID = types.NewNodeID(auth.pubKey) + return +} + +// SignBlock signs a types.Block. +func (au *Authenticator) SignBlock(b *types.Block) (err error) { + b.ProposerID = au.proposerID + if b.Hash, err = hashBlock(b); err != nil { + return + } + if b.Signature, err = au.prvKey.Sign(b.Hash); err != nil { + return + } + return +} + +// SignVote signs a types.Vote. +func (au *Authenticator) SignVote(v *types.Vote) (err error) { + v.ProposerID = au.proposerID + v.Signature, err = au.prvKey.Sign(hashVote(v)) + return +} + +// SignCRS signs CRS signature of types.Block. +func (au *Authenticator) SignCRS(b *types.Block, crs common.Hash) (err error) { + if b.ProposerID != au.proposerID { + err = ErrInvalidProposerID + return + } + b.CRSSignature, err = au.prvKey.Sign(hashCRS(b, crs)) + return +} + +// SignDKGComplaint signs a DKG complaint. +func (au *Authenticator) SignDKGComplaint( + complaint *types.DKGComplaint) (err error) { + complaint.ProposerID = au.proposerID + complaint.Signature, err = au.prvKey.Sign(hashDKGComplaint(complaint)) + return +} + +// SignDKGMasterPublicKey signs a DKG master public key. +func (au *Authenticator) SignDKGMasterPublicKey( + mpk *types.DKGMasterPublicKey) (err error) { + mpk.ProposerID = au.proposerID + mpk.Signature, err = au.prvKey.Sign(hashDKGMasterPublicKey(mpk)) + return +} + +// SignDKGPrivateShare signs a DKG private share. +func (au *Authenticator) SignDKGPrivateShare( + prvShare *types.DKGPrivateShare) (err error) { + prvShare.ProposerID = au.proposerID + prvShare.Signature, err = au.prvKey.Sign(hashDKGPrivateShare(prvShare)) + return +} + +// SignDKGPartialSignature signs a DKG partial signature. +func (au *Authenticator) SignDKGPartialSignature( + pSig *types.DKGPartialSignature) (err error) { + pSig.ProposerID = au.proposerID + pSig.Signature, err = au.prvKey.Sign(hashDKGPartialSignature(pSig)) + return +} + +// SignDKGFinalize signs a DKG finalize message. +func (au *Authenticator) SignDKGFinalize( + final *types.DKGFinalize) (err error) { + final.ProposerID = au.proposerID + final.Signature, err = au.prvKey.Sign(hashDKGFinalize(final)) + return +} + +// VerifyBlock verifies the signature of types.Block. +func (au *Authenticator) VerifyBlock(b *types.Block) (err error) { + hash, err := hashBlock(b) + if err != nil { + return + } + if hash != b.Hash { + err = ErrIncorrectHash + return + } + pubKey, err := crypto.SigToPub(b.Hash, b.Signature) + if err != nil { + return + } + if !b.ProposerID.Equal(types.NewNodeID(pubKey)) { + err = ErrIncorrectSignature + return + } + return +} + +// VerifyVote verifies the signature of types.Vote. +func (au *Authenticator) VerifyVote(v *types.Vote) (bool, error) { + return verifyVoteSignature(v) +} + +// VerifyCRS verifies the CRS signature of types.Block. +func (au *Authenticator) VerifyCRS(b *types.Block, crs common.Hash) (bool, error) { + return verifyCRSSignature(b, crs) +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/interfaces.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/interfaces.go new file mode 100644 index 000000000..fd176bc11 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/interfaces.go @@ -0,0 +1,70 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package blockdb + +import ( + "errors" + "fmt" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +var ( + // ErrBlockExists is the error when block eixsts. + ErrBlockExists = errors.New("block exists") + // ErrBlockDoesNotExist is the error when block does not eixst. + ErrBlockDoesNotExist = errors.New("block does not exist") + // ErrIterationFinished is the error to check if the iteration is finished. + ErrIterationFinished = errors.New("iteration finished") + // ErrEmptyPath is the error when the required path is empty. + ErrEmptyPath = fmt.Errorf("empty path") + // ErrClosed is the error when using DB after it's closed. + ErrClosed = fmt.Errorf("db closed") + // ErrNotImplemented is the error that some interface is not implemented. + ErrNotImplemented = fmt.Errorf("not implemented") +) + +// BlockDatabase is the interface for a BlockDatabase. +type BlockDatabase interface { + Reader + Writer + + // Close allows database implementation able to + // release resource when finishing. + Close() error +} + +// Reader defines the interface for reading blocks into DB. +type Reader interface { + Has(hash common.Hash) bool + Get(hash common.Hash) (types.Block, error) + GetAll() (BlockIterator, error) +} + +// Writer defines the interface for writing blocks into DB. +type Writer interface { + Update(block types.Block) error + Put(block types.Block) error +} + +// BlockIterator defines an iterator on blocks hold +// in a DB. +type BlockIterator interface { + Next() (types.Block, error) +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/level-db.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/level-db.go new file mode 100644 index 000000000..79099c0f1 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/level-db.go @@ -0,0 +1,127 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package blockdb + +import ( + "encoding/json" + + "github.com/syndtr/goleveldb/leveldb" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// LevelDBBackedBlockDB is a leveldb backed BlockDB implementation. +type LevelDBBackedBlockDB struct { + db *leveldb.DB +} + +// NewLevelDBBackedBlockDB initialize a leveldb-backed block database. +func NewLevelDBBackedBlockDB( + path string) (lvl *LevelDBBackedBlockDB, err error) { + + db, err := leveldb.OpenFile(path, nil) + if err != nil { + return + } + lvl = &LevelDBBackedBlockDB{db: db} + return +} + +// Close implement Closer interface, which would release allocated resource. +func (lvl *LevelDBBackedBlockDB) Close() error { + return lvl.db.Close() +} + +// Has implements the Reader.Has method. +func (lvl *LevelDBBackedBlockDB) Has(hash common.Hash) bool { + exists, err := lvl.db.Has([]byte(hash[:]), nil) + if err != nil { + // TODO(missionliao): Modify the interface to return error. + panic(err) + } + return exists +} + +// Get implements the Reader.Get method. +func (lvl *LevelDBBackedBlockDB) Get( + hash common.Hash) (block types.Block, err error) { + + queried, err := lvl.db.Get([]byte(hash[:]), nil) + if err != nil { + if err == leveldb.ErrNotFound { + err = ErrBlockDoesNotExist + } + return + } + err = json.Unmarshal(queried, &block) + if err != nil { + return + } + return +} + +// Update implements the Writer.Update method. +func (lvl *LevelDBBackedBlockDB) Update(block types.Block) (err error) { + // NOTE: we didn't handle changes of block hash (and it + // should not happen). + marshaled, err := json.Marshal(&block) + if err != nil { + return + } + + if !lvl.Has(block.Hash) { + err = ErrBlockDoesNotExist + return + } + err = lvl.db.Put( + []byte(block.Hash[:]), + marshaled, + nil) + if err != nil { + return + } + return +} + +// Put implements the Writer.Put method. +func (lvl *LevelDBBackedBlockDB) Put(block types.Block) (err error) { + marshaled, err := json.Marshal(&block) + if err != nil { + return + } + if lvl.Has(block.Hash) { + err = ErrBlockExists + return + } + err = lvl.db.Put( + []byte(block.Hash[:]), + marshaled, + nil) + if err != nil { + return + } + return +} + +// GetAll implements Reader.GetAll method, which allows callers +// to retrieve all blocks in DB. +func (lvl *LevelDBBackedBlockDB) GetAll() (BlockIterator, error) { + // TODO (mission): Implement this part via goleveldb's iterator. + return nil, ErrNotImplemented +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/memory.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/memory.go new file mode 100644 index 000000000..eeda47772 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/memory.go @@ -0,0 +1,179 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package blockdb + +import ( + "encoding/json" + "io/ioutil" + "os" + "sync" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +type seqIterator struct { + idx int + db *MemBackedBlockDB +} + +func (seq *seqIterator) Next() (types.Block, error) { + curIdx := seq.idx + seq.idx++ + return seq.db.getByIndex(curIdx) +} + +// MemBackedBlockDB is a memory backed BlockDB implementation. +type MemBackedBlockDB struct { + blocksMutex sync.RWMutex + blockHashSequence common.Hashes + blocksByHash map[common.Hash]*types.Block + persistantFilePath string +} + +// NewMemBackedBlockDB initialize a memory-backed block database. +func NewMemBackedBlockDB(persistantFilePath ...string) (db *MemBackedBlockDB, err error) { + db = &MemBackedBlockDB{ + blockHashSequence: common.Hashes{}, + blocksByHash: make(map[common.Hash]*types.Block), + } + if len(persistantFilePath) == 0 || len(persistantFilePath[0]) == 0 { + return + } + db.persistantFilePath = persistantFilePath[0] + buf, err := ioutil.ReadFile(db.persistantFilePath) + if err != nil { + if !os.IsNotExist(err) { + // Something unexpected happened. + return + } + // It's expected behavior that file doesn't exists, we should not + // report error on it. + err = nil + return + } + + // Init this instance by file content, it's a temporary way + // to export those private field for JSON encoding. + toLoad := struct { + Sequence common.Hashes + ByHash map[common.Hash]*types.Block + }{} + err = json.Unmarshal(buf, &toLoad) + if err != nil { + return + } + db.blockHashSequence = toLoad.Sequence + db.blocksByHash = toLoad.ByHash + return +} + +// Has returns wheter or not the DB has a block identified with the hash. +func (m *MemBackedBlockDB) Has(hash common.Hash) bool { + m.blocksMutex.RLock() + defer m.blocksMutex.RUnlock() + + _, ok := m.blocksByHash[hash] + return ok +} + +// Get returns a block given a hash. +func (m *MemBackedBlockDB) Get(hash common.Hash) (types.Block, error) { + m.blocksMutex.RLock() + defer m.blocksMutex.RUnlock() + + return m.internalGet(hash) +} + +func (m *MemBackedBlockDB) internalGet(hash common.Hash) (types.Block, error) { + b, ok := m.blocksByHash[hash] + if !ok { + return types.Block{}, ErrBlockDoesNotExist + } + return *b, nil +} + +// Put inserts a new block into the database. +func (m *MemBackedBlockDB) Put(block types.Block) error { + if m.Has(block.Hash) { + return ErrBlockExists + } + + m.blocksMutex.Lock() + defer m.blocksMutex.Unlock() + + m.blockHashSequence = append(m.blockHashSequence, block.Hash) + m.blocksByHash[block.Hash] = &block + return nil +} + +// Update updates a block in the database. +func (m *MemBackedBlockDB) Update(block types.Block) error { + m.blocksMutex.Lock() + defer m.blocksMutex.Unlock() + + m.blocksByHash[block.Hash] = &block + return nil +} + +// Close implement Closer interface, which would release allocated resource. +func (m *MemBackedBlockDB) Close() (err error) { + // Save internal state to a pretty-print json file. It's a temporary way + // to dump private file via JSON encoding. + if len(m.persistantFilePath) == 0 { + return + } + + m.blocksMutex.RLock() + defer m.blocksMutex.RUnlock() + + toDump := struct { + Sequence common.Hashes + ByHash map[common.Hash]*types.Block + }{ + Sequence: m.blockHashSequence, + ByHash: m.blocksByHash, + } + + // Dump to JSON with 2-space indent. + buf, err := json.Marshal(&toDump) + if err != nil { + return + } + + err = ioutil.WriteFile(m.persistantFilePath, buf, 0644) + return +} + +func (m *MemBackedBlockDB) getByIndex(idx int) (types.Block, error) { + m.blocksMutex.RLock() + defer m.blocksMutex.RUnlock() + + if idx >= len(m.blockHashSequence) { + return types.Block{}, ErrIterationFinished + } + + hash := m.blockHashSequence[idx] + return m.internalGet(hash) +} + +// GetAll implement Reader.GetAll method, which allows caller +// to retrieve all blocks in DB. +func (m *MemBackedBlockDB) GetAll() (BlockIterator, error) { + return &seqIterator{db: m}, nil +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockpool.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockpool.go new file mode 100644 index 000000000..7441cf92a --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockpool.go @@ -0,0 +1,85 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "container/heap" + + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// blockPool is a slice of heap of blocks, indexed by chainID, +// and the heap is sorted based on heights of blocks. +type blockPool []types.ByPosition + +// newBlockPool constructs a blockPool. +func newBlockPool(chainNum uint32) (pool blockPool) { + pool = make(blockPool, chainNum) + for _, p := range pool { + heap.Init(&p) + } + return +} + +// resize the pool if new chain is added. +func (p *blockPool) resize(num uint32) { + if uint32(len(*p)) < num { + return + } + newPool := make([]types.ByPosition, num) + copy(newPool, *p) + for i := uint32(len(*p)); i < num; i++ { + newChain := types.ByPosition{} + heap.Init(&newChain) + newPool[i] = newChain + } + *p = newPool +} + +// addBlock adds a block into pending set and make sure these +// blocks are sorted by height. +func (p blockPool) addBlock(b *types.Block) { + heap.Push(&p[b.Position.ChainID], b) +} + +// purgeBlocks purge blocks of that chain with less-or-equal height. +// NOTE: we won't check the validity of 'chainID', the caller should +// be sure what he is expecting. +func (p blockPool) purgeBlocks(chainID uint32, height uint64) { + for { + if len(p[chainID]) == 0 || p[chainID][0].Position.Height > height { + break + } + heap.Pop(&p[chainID]) + } +} + +// tip get the blocks with lowest height of the chain if any. +func (p blockPool) tip(chainID uint32) *types.Block { + if len(p[chainID]) == 0 { + return nil + } + return p[chainID][0] +} + +// removeTip removes block with lowest height of the specified chain. +func (p blockPool) removeTip(chainID uint32) { + if len(p[chainID]) > 0 { + heap.Pop(&p[chainID]) + } +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/compaction-chain.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/compaction-chain.go new file mode 100644 index 000000000..5b13f7fe1 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/compaction-chain.go @@ -0,0 +1,256 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "container/heap" + "fmt" + "sync" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// Errors for compaction chain module. +var ( + ErrBlockNotRegistered = fmt.Errorf( + "block not registered") + ErrNotInitiazlied = fmt.Errorf( + "not initialized") +) + +type finalizedBlockHeap = types.ByFinalizationHeight + +type compactionChain struct { + gov Governance + chainUnsynced uint32 + tsigVerifier *TSigVerifierCache + blocks map[common.Hash]*types.Block + pendingBlocks []*types.Block + pendingFinalizedBlocks *finalizedBlockHeap + lock sync.RWMutex + prevBlock *types.Block +} + +func newCompactionChain(gov Governance) *compactionChain { + pendingFinalizedBlocks := &finalizedBlockHeap{} + heap.Init(pendingFinalizedBlocks) + return &compactionChain{ + gov: gov, + tsigVerifier: NewTSigVerifierCache(gov, 7), + blocks: make(map[common.Hash]*types.Block), + pendingFinalizedBlocks: pendingFinalizedBlocks, + } +} + +func (cc *compactionChain) init(initBlock *types.Block) { + cc.lock.Lock() + defer cc.lock.Unlock() + cc.prevBlock = initBlock + cc.pendingBlocks = []*types.Block{} + if initBlock.Finalization.Height == 0 { + cc.chainUnsynced = cc.gov.Configuration(uint64(0)).NumChains + cc.pendingBlocks = append(cc.pendingBlocks, initBlock) + } +} + +func (cc *compactionChain) registerBlock(block *types.Block) { + if cc.blockRegistered(block.Hash) { + return + } + cc.lock.Lock() + defer cc.lock.Unlock() + cc.blocks[block.Hash] = block +} + +func (cc *compactionChain) blockRegistered(hash common.Hash) (exist bool) { + cc.lock.RLock() + defer cc.lock.RUnlock() + _, exist = cc.blocks[hash] + return +} + +func (cc *compactionChain) processBlock(block *types.Block) error { + prevBlock := cc.lastBlock() + if prevBlock == nil { + return ErrNotInitiazlied + } + cc.lock.Lock() + defer cc.lock.Unlock() + if prevBlock.Finalization.Height == 0 && block.Position.Height == 0 { + cc.chainUnsynced-- + } + cc.pendingBlocks = append(cc.pendingBlocks, block) + return nil +} + +func (cc *compactionChain) extractBlocks() []*types.Block { + prevBlock := cc.lastBlock() + + // Check if we're synced. + if !func() bool { + cc.lock.RLock() + defer cc.lock.RUnlock() + if len(cc.pendingBlocks) == 0 { + return false + } + // Finalization.Height == 0 is syncing from bootstrap. + if prevBlock.Finalization.Height == 0 { + return cc.chainUnsynced == 0 + } + if prevBlock.Hash != cc.pendingBlocks[0].Hash { + return false + } + return true + }() { + return []*types.Block{} + } + deliveringBlocks := make([]*types.Block, 0) + cc.lock.Lock() + defer cc.lock.Unlock() + // cc.pendingBlocks[0] will not be popped and will equal to cc.prevBlock. + for len(cc.pendingBlocks) > 1 && + (len(cc.pendingBlocks[1].Finalization.Randomness) != 0 || + cc.pendingBlocks[1].Position.Round == 0) { + delete(cc.blocks, cc.pendingBlocks[0].Hash) + cc.pendingBlocks = cc.pendingBlocks[1:] + + block := cc.pendingBlocks[0] + block.Finalization.Height = prevBlock.Finalization.Height + 1 + deliveringBlocks = append(deliveringBlocks, block) + prevBlock = block + } + + cc.prevBlock = prevBlock + + return deliveringBlocks +} + +func (cc *compactionChain) processFinalizedBlock(block *types.Block) { + if block.Finalization.Height <= cc.lastBlock().Finalization.Height { + return + } + + cc.lock.Lock() + defer cc.lock.Unlock() + heap.Push(cc.pendingFinalizedBlocks, block) + + return +} + +func (cc *compactionChain) extractFinalizedBlocks() []*types.Block { + prevBlock := cc.lastBlock() + + blocks := func() []*types.Block { + cc.lock.Lock() + defer cc.lock.Unlock() + blocks := []*types.Block{} + prevHeight := prevBlock.Finalization.Height + for cc.pendingFinalizedBlocks.Len() != 0 { + tip := (*cc.pendingFinalizedBlocks)[0] + // Pop blocks that are already confirmed. + if tip.Finalization.Height <= prevBlock.Finalization.Height { + heap.Pop(cc.pendingFinalizedBlocks) + continue + } + // Since we haven't verified the finalized block, + // it is possible to be forked. + if tip.Finalization.Height == prevHeight || + tip.Finalization.Height == prevHeight+1 { + prevHeight = tip.Finalization.Height + blocks = append(blocks, tip) + heap.Pop(cc.pendingFinalizedBlocks) + } else { + break + } + } + return blocks + }() + toPending := []*types.Block{} + confirmed := []*types.Block{} + for _, b := range blocks { + if b.Hash == prevBlock.Hash && + b.Finalization.Height == prevBlock.Finalization.Height { + continue + } + round := b.Position.Round + v, ok, err := cc.tsigVerifier.UpdateAndGet(round) + if err != nil { + continue + } + if !ok { + toPending = append(toPending, b) + continue + } + if ok := v.VerifySignature(b.Hash, crypto.Signature{ + Type: "bls", + Signature: b.Finalization.Randomness}); !ok { + continue + } + // Fork resolution: choose block with smaller hash. + if prevBlock.Finalization.Height == + b.Finalization.Height { + //TODO(jimmy-dexon): remove this panic after test. + if true { + // workaround to `go vet` error + panic(fmt.Errorf( + "forked finalized block %s,%s", prevBlock.Hash, b.Hash)) + } + if b.Hash.Less(prevBlock.Hash) { + confirmed = confirmed[:len(confirmed)-1] + } else { + continue + } + } + if b.Finalization.Height-prevBlock.Finalization.Height > 1 { + toPending = append(toPending, b) + continue + } + confirmed = append(confirmed, b) + prevBlock = b + } + cc.lock.Lock() + defer cc.lock.Unlock() + if len(confirmed) != 0 && cc.prevBlock.Finalization.Height == 0 { + // Pop the initBlock inserted when bootstrapping. + cc.pendingBlocks = cc.pendingBlocks[1:] + } + cc.prevBlock = prevBlock + for _, b := range toPending { + heap.Push(cc.pendingFinalizedBlocks, b) + } + return confirmed +} + +func (cc *compactionChain) processBlockRandomnessResult( + rand *types.BlockRandomnessResult) error { + if !cc.blockRegistered(rand.BlockHash) { + return ErrBlockNotRegistered + } + cc.lock.Lock() + defer cc.lock.Unlock() + cc.blocks[rand.BlockHash].Finalization.Randomness = rand.Randomness + return nil +} + +func (cc *compactionChain) lastBlock() *types.Block { + cc.lock.RLock() + defer cc.lock.RUnlock() + return cc.prevBlock +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/configuration-chain.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/configuration-chain.go new file mode 100644 index 000000000..267635155 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/configuration-chain.go @@ -0,0 +1,305 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "fmt" + "sync" + "time" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// Errors for configuration chain.. +var ( + ErrDKGNotRegistered = fmt.Errorf( + "not yet registered in DKG protocol") + ErrTSigAlreadyRunning = fmt.Errorf( + "tsig is already running") + ErrDKGNotReady = fmt.Errorf( + "DKG is not ready") +) + +type configurationChain struct { + ID types.NodeID + recv dkgReceiver + gov Governance + dkg *dkgProtocol + logger common.Logger + dkgLock sync.RWMutex + dkgSigner map[uint64]*dkgShareSecret + gpk map[uint64]*DKGGroupPublicKey + dkgResult sync.RWMutex + tsig map[common.Hash]*tsigProtocol + tsigTouched map[common.Hash]struct{} + tsigReady *sync.Cond + // TODO(jimmy-dexon): add timeout to pending psig. + pendingPsig map[common.Hash][]*types.DKGPartialSignature + prevHash common.Hash +} + +func newConfigurationChain( + ID types.NodeID, + recv dkgReceiver, + gov Governance, + logger common.Logger) *configurationChain { + return &configurationChain{ + ID: ID, + recv: recv, + gov: gov, + logger: logger, + dkgSigner: make(map[uint64]*dkgShareSecret), + gpk: make(map[uint64]*DKGGroupPublicKey), + tsig: make(map[common.Hash]*tsigProtocol), + tsigTouched: make(map[common.Hash]struct{}), + tsigReady: sync.NewCond(&sync.Mutex{}), + pendingPsig: make(map[common.Hash][]*types.DKGPartialSignature), + } +} + +func (cc *configurationChain) registerDKG(round uint64, threshold int) { + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + cc.dkg = newDKGProtocol( + cc.ID, + cc.recv, + round, + threshold) +} + +func (cc *configurationChain) runDKG(round uint64) error { + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + if cc.dkg == nil || cc.dkg.round != round { + return ErrDKGNotRegistered + } + if func() bool { + cc.dkgResult.RLock() + defer cc.dkgResult.RUnlock() + _, exist := cc.gpk[round] + return exist + }() { + return nil + } + + ticker := newTicker(cc.gov, round, TickerDKG) + cc.dkgLock.Unlock() + <-ticker.Tick() + cc.dkgLock.Lock() + // Phase 2(T = 0): Exchange DKG secret key share. + cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round) + cc.dkg.processMasterPublicKeys(cc.gov.DKGMasterPublicKeys(round)) + // Phase 3(T = 0~λ): Propose complaint. + // Propose complaint is done in `processMasterPublicKeys`. + cc.dkgLock.Unlock() + <-ticker.Tick() + cc.dkgLock.Lock() + // Phase 4(T = λ): Propose nack complaints. + cc.dkg.proposeNackComplaints() + cc.dkgLock.Unlock() + <-ticker.Tick() + cc.dkgLock.Lock() + // Phase 5(T = 2λ): Propose Anti nack complaint. + cc.logger.Debug("Calling Governance.DKGComplaints", "round", round) + cc.dkg.processNackComplaints(cc.gov.DKGComplaints(round)) + cc.dkgLock.Unlock() + <-ticker.Tick() + cc.dkgLock.Lock() + // Phase 6(T = 3λ): Rebroadcast anti nack complaint. + // Rebroadcast is done in `processPrivateShare`. + cc.dkgLock.Unlock() + <-ticker.Tick() + cc.dkgLock.Lock() + // Phase 7(T = 4λ): Enforce complaints and nack complaints. + cc.logger.Debug("Calling Governance.DKGComplaints", "round", round) + cc.dkg.enforceNackComplaints(cc.gov.DKGComplaints(round)) + // Enforce complaint is done in `processPrivateShare`. + // Phase 8(T = 5λ): DKG finalize. + cc.dkgLock.Unlock() + <-ticker.Tick() + cc.dkgLock.Lock() + cc.dkg.proposeFinalize() + // Phase 9(T = 6λ): DKG is ready. + cc.dkgLock.Unlock() + <-ticker.Tick() + cc.dkgLock.Lock() + // Normally, IsDKGFinal would return true here. Use this for in case of + // unexpected network fluctuation and ensure the robustness of DKG protocol. + cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round) + for !cc.gov.IsDKGFinal(round) { + cc.logger.Info("DKG is not ready yet. Try again later...", + "nodeID", cc.ID) + time.Sleep(500 * time.Millisecond) + } + cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round) + cc.logger.Debug("Calling Governance.DKGComplaints", "round", round) + gpk, err := NewDKGGroupPublicKey(round, + cc.gov.DKGMasterPublicKeys(round), + cc.gov.DKGComplaints(round), + cc.dkg.threshold) + if err != nil { + return err + } + signer, err := cc.dkg.recoverShareSecret(gpk.qualifyIDs) + if err != nil { + return err + } + qualifies := "" + for nID := range gpk.qualifyNodeIDs { + qualifies += fmt.Sprintf("%s ", nID.String()[:6]) + } + cc.logger.Info("Qualify Nodes", + "nodeID", cc.ID, + "round", round, + "count", len(gpk.qualifyIDs), + "qualifies", qualifies) + cc.dkgResult.Lock() + defer cc.dkgResult.Unlock() + cc.dkgSigner[round] = signer + cc.gpk[round] = gpk + return nil +} + +func (cc *configurationChain) preparePartialSignature( + round uint64, hash common.Hash) (*types.DKGPartialSignature, error) { + signer, exist := func() (*dkgShareSecret, bool) { + cc.dkgResult.RLock() + defer cc.dkgResult.RUnlock() + signer, exist := cc.dkgSigner[round] + return signer, exist + }() + if !exist { + return nil, ErrDKGNotReady + } + return &types.DKGPartialSignature{ + ProposerID: cc.ID, + Round: round, + Hash: hash, + PartialSignature: signer.sign(hash), + }, nil +} + +func (cc *configurationChain) touchTSigHash(hash common.Hash) (first bool) { + cc.tsigReady.L.Lock() + defer cc.tsigReady.L.Unlock() + _, exist := cc.tsigTouched[hash] + cc.tsigTouched[hash] = struct{}{} + return !exist +} + +func (cc *configurationChain) runTSig( + round uint64, hash common.Hash) ( + crypto.Signature, error) { + gpk, exist := func() (*DKGGroupPublicKey, bool) { + cc.dkgResult.RLock() + defer cc.dkgResult.RUnlock() + gpk, exist := cc.gpk[round] + return gpk, exist + }() + if !exist { + return crypto.Signature{}, ErrDKGNotReady + } + cc.tsigReady.L.Lock() + defer cc.tsigReady.L.Unlock() + if _, exist := cc.tsig[hash]; exist { + return crypto.Signature{}, ErrTSigAlreadyRunning + } + cc.tsig[hash] = newTSigProtocol(gpk, hash) + pendingPsig := cc.pendingPsig[hash] + delete(cc.pendingPsig, hash) + go func() { + for _, psig := range pendingPsig { + if err := cc.processPartialSignature(psig); err != nil { + cc.logger.Error("failed to process partial signature", + "nodeID", cc.ID, + "error", err) + } + } + }() + var signature crypto.Signature + var err error + for func() bool { + signature, err = cc.tsig[hash].signature() + return err == ErrNotEnoughtPartialSignatures + }() { + cc.tsigReady.Wait() + } + delete(cc.tsig, hash) + delete(cc.tsigTouched, hash) + if err != nil { + return crypto.Signature{}, err + } + return signature, nil +} + +func (cc *configurationChain) runBlockTSig( + round uint64, hash common.Hash) (crypto.Signature, error) { + sig, err := cc.runTSig(round, hash) + if err != nil { + return crypto.Signature{}, err + } + cc.logger.Info("Block TSIG", + "nodeID", cc.ID, + "round", round, + "signature", sig) + return sig, nil +} + +func (cc *configurationChain) runCRSTSig( + round uint64, crs common.Hash) ([]byte, error) { + sig, err := cc.runTSig(round, crs) + cc.logger.Info("CRS", + "nodeID", cc.ID, + "round", round+1, + "signature", sig) + return sig.Signature[:], err +} + +func (cc *configurationChain) processPrivateShare( + prvShare *types.DKGPrivateShare) error { + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + if cc.dkg == nil { + return nil + } + return cc.dkg.processPrivateShare(prvShare) +} + +func (cc *configurationChain) processPartialSignature( + psig *types.DKGPartialSignature) error { + cc.tsigReady.L.Lock() + defer cc.tsigReady.L.Unlock() + if _, exist := cc.tsig[psig.Hash]; !exist { + ok, err := verifyDKGPartialSignatureSignature(psig) + if err != nil { + return err + } + if !ok { + return ErrIncorrectPartialSignatureSignature + } + cc.pendingPsig[psig.Hash] = append(cc.pendingPsig[psig.Hash], psig) + return nil + } + if err := cc.tsig[psig.Hash].processPartialSignature(psig); err != nil { + return err + } + cc.tsigReady.Broadcast() + return nil +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/consensus-timestamp.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/consensus-timestamp.go new file mode 100644 index 000000000..49270d491 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/consensus-timestamp.go @@ -0,0 +1,129 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "errors" + "time" + + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// consensusTimestamp is for Concensus Timestamp Algorithm. +type consensusTimestamp struct { + chainTimestamps []time.Time + + // This part keeps configs for each round. + numChainsOfRounds []uint32 + numChainsBase uint64 + + // dMoment represents the genesis time. + dMoment time.Time +} + +var ( + // ErrTimestampNotIncrease would be reported if the timestamp is not strickly + // increasing on the same chain. + ErrTimestampNotIncrease = errors.New("timestamp is not increasing") + // ErrNoRoundConfig for no round config found. + ErrNoRoundConfig = errors.New("no round config found") +) + +// newConsensusTimestamp creates timestamper object. +func newConsensusTimestamp( + dMoment time.Time, round uint64, numChains uint32) *consensusTimestamp { + ts := make([]time.Time, 0, numChains) + for i := uint32(0); i < numChains; i++ { + ts = append(ts, dMoment) + } + return &consensusTimestamp{ + numChainsOfRounds: []uint32{numChains}, + numChainsBase: round, + dMoment: dMoment, + chainTimestamps: ts, + } +} + +// appendConfig appends a configuration for upcoming round. When you append +// a config for round R, next time you can only append the config for round R+1. +func (ct *consensusTimestamp) appendConfig( + round uint64, config *types.Config) error { + + if round != uint64(len(ct.numChainsOfRounds))+ct.numChainsBase { + return ErrRoundNotIncreasing + } + ct.numChainsOfRounds = append(ct.numChainsOfRounds, config.NumChains) + return nil +} + +func (ct *consensusTimestamp) resizeChainTimetamps(numChain uint32) { + l := uint32(len(ct.chainTimestamps)) + if numChain > l { + for i := l; i < numChain; i++ { + ct.chainTimestamps = append(ct.chainTimestamps, ct.dMoment) + } + } else if numChain < l { + ct.chainTimestamps = ct.chainTimestamps[:numChain] + } +} + +// ProcessBlocks is the entry function. +func (ct *consensusTimestamp) processBlocks(blocks []*types.Block) (err error) { + for _, block := range blocks { + // Rounds might interleave within rounds if no configuration change + // occurs. And it is limited to one round, that is, round r can only + // interleave with r-1 and r+1. + round := block.Position.Round + if ct.numChainsBase == round || ct.numChainsBase+1 == round { + // Normal case, no need to modify chainTimestamps. + } else if ct.numChainsBase+2 == round { + if len(ct.numChainsOfRounds) < 2 { + return ErrNoRoundConfig + } + if ct.numChainsOfRounds[0] > ct.numChainsOfRounds[1] { + ct.resizeChainTimetamps(ct.numChainsOfRounds[0]) + } else { + ct.resizeChainTimetamps(ct.numChainsOfRounds[1]) + } + ct.numChainsBase++ + ct.numChainsOfRounds = ct.numChainsOfRounds[1:] + } else { + // Error if round < base or round > base + 2. + return ErrInvalidRoundID + } + ts := ct.chainTimestamps[:ct.numChainsOfRounds[round-ct.numChainsBase]] + if block.Finalization.Timestamp, err = getMedianTime(ts); err != nil { + return + } + if block.Timestamp.Before(ct.chainTimestamps[block.Position.ChainID]) { + return ErrTimestampNotIncrease + } + ct.chainTimestamps[block.Position.ChainID] = block.Timestamp + } + return +} + +func (ct *consensusTimestamp) isSynced() bool { + numChain := ct.numChainsOfRounds[0] + for i := uint32(0); i < numChain; i++ { + if ct.chainTimestamps[i].Equal(ct.dMoment) { + return false + } + } + return true +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/consensus.go new file mode 100644 index 000000000..c7bef4bb1 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/consensus.go @@ -0,0 +1,920 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "context" + "encoding/hex" + "fmt" + "sync" + "time" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/blockdb" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// Errors for consensus core. +var ( + ErrProposerNotInNodeSet = fmt.Errorf( + "proposer is not in node set") + ErrIncorrectHash = fmt.Errorf( + "hash of block is incorrect") + ErrIncorrectSignature = fmt.Errorf( + "signature of block is incorrect") + ErrGenesisBlockNotEmpty = fmt.Errorf( + "genesis block should be empty") + ErrUnknownBlockProposed = fmt.Errorf( + "unknown block is proposed") + ErrIncorrectAgreementResultPosition = fmt.Errorf( + "incorrect agreement result position") + ErrNotEnoughVotes = fmt.Errorf( + "not enought votes") + ErrIncorrectVoteProposer = fmt.Errorf( + "incorrect vote proposer") + ErrIncorrectBlockRandomnessResult = fmt.Errorf( + "incorrect block randomness result") +) + +// consensusBAReceiver implements agreementReceiver. +type consensusBAReceiver struct { + // TODO(mission): consensus would be replaced by lattice and network. + consensus *Consensus + agreementModule *agreement + chainID uint32 + changeNotaryTime time.Time + round uint64 + restartNotary chan bool +} + +func (recv *consensusBAReceiver) ProposeVote(vote *types.Vote) { + if err := recv.agreementModule.prepareVote(vote); err != nil { + recv.consensus.logger.Error("Failed to prepare vote", "error", err) + return + } + go func() { + if err := recv.agreementModule.processVote(vote); err != nil { + recv.consensus.logger.Error("Failed to process vote", "error", err) + return + } + recv.consensus.logger.Debug("Calling Network.BroadcastVote", + "vote", vote) + recv.consensus.network.BroadcastVote(vote) + }() +} + +func (recv *consensusBAReceiver) ProposeBlock() common.Hash { + block := recv.consensus.proposeBlock(recv.chainID, recv.round) + recv.consensus.baModules[recv.chainID].addCandidateBlock(block) + if err := recv.consensus.preProcessBlock(block); err != nil { + recv.consensus.logger.Error("Failed to pre-process block", "error", err) + return common.Hash{} + } + recv.consensus.logger.Debug("Calling Network.BroadcastBlock", "block", block) + recv.consensus.network.BroadcastBlock(block) + return block.Hash +} + +func (recv *consensusBAReceiver) ConfirmBlock( + hash common.Hash, votes map[types.NodeID]*types.Vote) { + var block *types.Block + if (hash == common.Hash{}) { + recv.consensus.logger.Info("Empty block is confirmed", + "position", recv.agreementModule.agreementID()) + var err error + block, err = recv.consensus.proposeEmptyBlock(recv.chainID) + if err != nil { + recv.consensus.logger.Error("Propose empty block failed", "error", err) + return + } + } else { + var exist bool + block, exist = recv.consensus.baModules[recv.chainID]. + findCandidateBlock(hash) + if !exist { + recv.consensus.logger.Error("Unknown block confirmed", "hash", hash) + return + } + } + recv.consensus.ccModule.registerBlock(block) + voteList := make([]types.Vote, 0, len(votes)) + for _, vote := range votes { + if vote.BlockHash != hash { + continue + } + voteList = append(voteList, *vote) + } + result := &types.AgreementResult{ + BlockHash: block.Hash, + Position: block.Position, + Votes: voteList, + } + recv.consensus.logger.Debug("Calling Network.BroadcastAgreementResult", + "result", result) + recv.consensus.network.BroadcastAgreementResult(result) + if err := recv.consensus.processBlock(block); err != nil { + recv.consensus.logger.Error("Failed to process block", "error", err) + return + } + if block.Timestamp.After(recv.changeNotaryTime) { + recv.round++ + recv.restartNotary <- true + } else { + recv.restartNotary <- false + } +} + +// consensusDKGReceiver implements dkgReceiver. +type consensusDKGReceiver struct { + ID types.NodeID + gov Governance + authModule *Authenticator + nodeSetCache *NodeSetCache + cfgModule *configurationChain + network Network + logger common.Logger +} + +// ProposeDKGComplaint proposes a DKGComplaint. +func (recv *consensusDKGReceiver) ProposeDKGComplaint( + complaint *types.DKGComplaint) { + if err := recv.authModule.SignDKGComplaint(complaint); err != nil { + recv.logger.Error("Failed to sign DKG complaint", "error", err) + return + } + recv.logger.Debug("Calling Governace.AddDKGComplaint", + "complaint", complaint) + recv.gov.AddDKGComplaint(complaint.Round, complaint) +} + +// ProposeDKGMasterPublicKey propose a DKGMasterPublicKey. +func (recv *consensusDKGReceiver) ProposeDKGMasterPublicKey( + mpk *types.DKGMasterPublicKey) { + if err := recv.authModule.SignDKGMasterPublicKey(mpk); err != nil { + recv.logger.Error("Failed to sign DKG master public key", "error", err) + return + } + recv.logger.Debug("Calling Governance.AddDKGMasterPublicKey", "key", mpk) + recv.gov.AddDKGMasterPublicKey(mpk.Round, mpk) +} + +// ProposeDKGPrivateShare propose a DKGPrivateShare. +func (recv *consensusDKGReceiver) ProposeDKGPrivateShare( + prv *types.DKGPrivateShare) { + if err := recv.authModule.SignDKGPrivateShare(prv); err != nil { + recv.logger.Error("Failed to sign DKG private share", "error", err) + return + } + receiverPubKey, exists := recv.nodeSetCache.GetPublicKey(prv.ReceiverID) + if !exists { + recv.logger.Error("Public key for receiver not found", + "receiver", prv.ReceiverID.String()[:6]) + return + } + if prv.ReceiverID == recv.ID { + go func() { + if err := recv.cfgModule.processPrivateShare(prv); err != nil { + recv.logger.Error("Failed to process self private share", "prvShare", prv) + } + }() + } else { + recv.logger.Debug("Calling Network.SendDKGPrivateShare", + "receiver", hex.EncodeToString(receiverPubKey.Bytes())) + recv.network.SendDKGPrivateShare(receiverPubKey, prv) + } +} + +// ProposeDKGAntiNackComplaint propose a DKGPrivateShare as an anti complaint. +func (recv *consensusDKGReceiver) ProposeDKGAntiNackComplaint( + prv *types.DKGPrivateShare) { + if prv.ProposerID == recv.ID { + if err := recv.authModule.SignDKGPrivateShare(prv); err != nil { + recv.logger.Error("Failed sign DKG private share", "error", err) + return + } + } + recv.logger.Debug("Calling Network.BroadcastDKGPrivateShare", "share", prv) + recv.network.BroadcastDKGPrivateShare(prv) +} + +// ProposeDKGFinalize propose a DKGFinalize message. +func (recv *consensusDKGReceiver) ProposeDKGFinalize(final *types.DKGFinalize) { + if err := recv.authModule.SignDKGFinalize(final); err != nil { + recv.logger.Error("Faield to sign DKG finalize", "error", err) + return + } + recv.logger.Debug("Calling Governance.AddDKGFinalize", "final", final) + recv.gov.AddDKGFinalize(final.Round, final) +} + +// Consensus implements DEXON Consensus algorithm. +type Consensus struct { + // Node Info. + ID types.NodeID + authModule *Authenticator + currentConfig *types.Config + + // Modules. + nbModule *nonBlocking + + // BA. + baModules []*agreement + receivers []*consensusBAReceiver + + // DKG. + dkgRunning int32 + dkgReady *sync.Cond + cfgModule *configurationChain + + // Dexon consensus v1's modules. + lattice *Lattice + ccModule *compactionChain + + // Interfaces. + db blockdb.BlockDatabase + gov Governance + network Network + tickerObj Ticker + + // Misc. + dMoment time.Time + nodeSetCache *NodeSetCache + round uint64 + roundToNotify uint64 + lock sync.RWMutex + ctx context.Context + ctxCancel context.CancelFunc + event *common.Event + logger common.Logger +} + +// NewConsensus construct an Consensus instance. +func NewConsensus( + dMoment time.Time, + app Application, + gov Governance, + db blockdb.BlockDatabase, + network Network, + prv crypto.PrivateKey, + logger common.Logger) *Consensus { + + // TODO(w): load latest blockHeight from DB, and use config at that height. + var ( + round uint64 + // round 0 and 1 are decided at beginning. + roundToNotify = round + 2 + ) + logger.Debug("Calling Governance.Configuration", "round", round) + config := gov.Configuration(round) + nodeSetCache := NewNodeSetCache(gov) + logger.Debug("Calling Governance.CRS", "round", round) + crs := gov.CRS(round) + // Setup acking by information returned from Governace. + nodes, err := nodeSetCache.GetNodeSet(round) + if err != nil { + panic(err) + } + // Setup auth module. + authModule := NewAuthenticator(prv) + // Check if the application implement Debug interface. + debugApp, _ := app.(Debug) + // Setup nonblocking module. + nbModule := newNonBlocking(app, debugApp) + // Init lattice. + lattice := NewLattice( + dMoment, config, authModule, nbModule, nbModule, db, logger) + // Init configuration chain. + ID := types.NewNodeID(prv.PublicKey()) + recv := &consensusDKGReceiver{ + ID: ID, + gov: gov, + authModule: authModule, + nodeSetCache: nodeSetCache, + network: network, + logger: logger, + } + cfgModule := newConfigurationChain( + ID, + recv, + gov, + logger) + recv.cfgModule = cfgModule + // Construct Consensus instance. + con := &Consensus{ + ID: ID, + currentConfig: config, + ccModule: newCompactionChain(gov), + lattice: lattice, + nbModule: nbModule, + gov: gov, + db: db, + network: network, + tickerObj: newTicker(gov, round, TickerBA), + dkgReady: sync.NewCond(&sync.Mutex{}), + cfgModule: cfgModule, + dMoment: dMoment, + nodeSetCache: nodeSetCache, + authModule: authModule, + event: common.NewEvent(), + logger: logger, + roundToNotify: roundToNotify, + } + + con.baModules = make([]*agreement, config.NumChains) + con.receivers = make([]*consensusBAReceiver, config.NumChains) + for i := uint32(0); i < config.NumChains; i++ { + chainID := i + recv := &consensusBAReceiver{ + consensus: con, + chainID: chainID, + restartNotary: make(chan bool, 1), + } + agreementModule := newAgreement( + con.ID, + recv, + nodes.IDs, + newLeaderSelector(crs), + con.authModule, + ) + // Hacky way to make agreement module self contained. + recv.agreementModule = agreementModule + recv.changeNotaryTime = dMoment + con.baModules[chainID] = agreementModule + con.receivers[chainID] = recv + } + return con +} + +// Run starts running DEXON Consensus. +func (con *Consensus) Run(initBlock *types.Block) { + // Setup context. + con.ctx, con.ctxCancel = context.WithCancel(context.Background()) + con.ccModule.init(initBlock) + // TODO(jimmy-dexon): change AppendConfig to add config for specific round. + for i := uint64(0); i < initBlock.Position.Round; i++ { + con.logger.Debug("Calling Governance.Configuration", "round", i+1) + cfg := con.gov.Configuration(i + 1) + if err := con.lattice.AppendConfig(i+1, cfg); err != nil { + panic(err) + } + } + con.logger.Debug("Calling Network.ReceiveChan") + go con.processMsg(con.network.ReceiveChan()) + // Sleep until dMoment come. + time.Sleep(con.dMoment.Sub(time.Now().UTC())) + con.cfgModule.registerDKG(con.round, int(con.currentConfig.DKGSetSize)/3+1) + con.event.RegisterTime(con.dMoment.Add(con.currentConfig.RoundInterval/4), + func(time.Time) { + con.runDKGTSIG(con.round) + }) + round1 := uint64(1) + con.logger.Debug("Calling Governance.Configuration", "round", round1) + con.lattice.AppendConfig(round1, con.gov.Configuration(round1)) + con.initialRound(con.dMoment) + ticks := make([]chan struct{}, 0, con.currentConfig.NumChains) + for i := uint32(0); i < con.currentConfig.NumChains; i++ { + tick := make(chan struct{}) + ticks = append(ticks, tick) + go con.runBA(i, tick) + } + + // Reset ticker. + <-con.tickerObj.Tick() + <-con.tickerObj.Tick() + for { + <-con.tickerObj.Tick() + for _, tick := range ticks { + go func(tick chan struct{}) { tick <- struct{}{} }(tick) + } + } +} + +func (con *Consensus) runBA(chainID uint32, tick <-chan struct{}) { + // TODO(jimmy-dexon): move this function inside agreement. + agreement := con.baModules[chainID] + recv := con.receivers[chainID] + recv.restartNotary <- true + nIDs := make(map[types.NodeID]struct{}) + // Reset ticker + <-tick +BALoop: + for { + select { + case <-con.ctx.Done(): + break BALoop + default: + } + select { + case newNotary := <-recv.restartNotary: + if newNotary { + recv.changeNotaryTime = + recv.changeNotaryTime.Add(con.currentConfig.RoundInterval) + nodes, err := con.nodeSetCache.GetNodeSet(recv.round) + if err != nil { + panic(err) + } + con.logger.Debug("Calling Governance.Configuration", + "round", recv.round) + con.logger.Debug("Calling Governance.CRS", "round", recv.round) + nIDs = nodes.GetSubSet( + int(con.gov.Configuration(recv.round).NotarySetSize), + types.NewNotarySetTarget(con.gov.CRS(recv.round), chainID)) + } + nextPos := con.lattice.NextPosition(chainID) + nextPos.Round = recv.round + agreement.restart(nIDs, nextPos) + default: + } + err := agreement.nextState() + if err != nil { + con.logger.Error("Failed to proceed to next state", + "nodeID", con.ID.String(), + "error", err) + break BALoop + } + for i := 0; i < agreement.clocks(); i++ { + // Priority select for agreement.done(). + select { + case <-agreement.done(): + continue BALoop + default: + } + select { + case <-agreement.done(): + continue BALoop + case <-tick: + } + } + } +} + +// runDKGTSIG starts running DKG+TSIG protocol. +func (con *Consensus) runDKGTSIG(round uint64) { + con.dkgReady.L.Lock() + defer con.dkgReady.L.Unlock() + if con.dkgRunning != 0 { + return + } + con.dkgRunning = 1 + go func() { + startTime := time.Now().UTC() + defer func() { + con.dkgReady.L.Lock() + defer con.dkgReady.L.Unlock() + con.dkgReady.Broadcast() + con.dkgRunning = 2 + DKGTime := time.Now().Sub(startTime) + if DKGTime.Nanoseconds() >= + con.currentConfig.RoundInterval.Nanoseconds()/2 { + con.logger.Warn("Your computer cannot finish DKG on time!", + "nodeID", con.ID.String()) + } + }() + if err := con.cfgModule.runDKG(round); err != nil { + panic(err) + } + nodes, err := con.nodeSetCache.GetNodeSet(round) + if err != nil { + panic(err) + } + con.logger.Debug("Calling Governance.Configuration", "round", round) + hash := HashConfigurationBlock( + nodes.IDs, + con.gov.Configuration(round), + common.Hash{}, + con.cfgModule.prevHash) + psig, err := con.cfgModule.preparePartialSignature( + round, hash) + if err != nil { + panic(err) + } + if err = con.authModule.SignDKGPartialSignature(psig); err != nil { + panic(err) + } + if err = con.cfgModule.processPartialSignature(psig); err != nil { + panic(err) + } + con.logger.Debug("Calling Network.BroadcastDKGPartialSignature", + "proposer", psig.ProposerID, + "round", psig.Round, + "hash", psig.Hash) + con.network.BroadcastDKGPartialSignature(psig) + if _, err = con.cfgModule.runBlockTSig(round, hash); err != nil { + panic(err) + } + }() +} + +func (con *Consensus) runCRS() { + // Start running next round CRS. + con.logger.Debug("Calling Governance.CRS", "round", con.round) + psig, err := con.cfgModule.preparePartialSignature( + con.round, con.gov.CRS(con.round)) + if err != nil { + con.logger.Error("Failed to prepare partial signature", "error", err) + } else if err = con.authModule.SignDKGPartialSignature(psig); err != nil { + con.logger.Error("Failed to sign DKG partial signature", "error", err) + } else if err = con.cfgModule.processPartialSignature(psig); err != nil { + con.logger.Error("Failed to process partial signature", "error", err) + } else { + con.logger.Debug("Calling Network.BroadcastDKGPartialSignature", + "proposer", psig.ProposerID, + "round", psig.Round, + "hash", psig.Hash) + con.network.BroadcastDKGPartialSignature(psig) + con.logger.Debug("Calling Governance.CRS", "round", con.round) + crs, err := con.cfgModule.runCRSTSig(con.round, con.gov.CRS(con.round)) + if err != nil { + con.logger.Error("Failed to run CRS Tsig", "error", err) + } else { + con.logger.Debug("Calling Governance.ProposeCRS", + "round", con.round+1, + "crs", crs) + con.gov.ProposeCRS(con.round+1, crs) + } + } +} + +func (con *Consensus) initialRound(startTime time.Time) { + select { + case <-con.ctx.Done(): + return + default: + } + con.logger.Debug("Calling Governance.Configuration", "round", con.round) + con.currentConfig = con.gov.Configuration(con.round) + + con.event.RegisterTime(startTime.Add(con.currentConfig.RoundInterval/2), + func(time.Time) { + go func() { + con.runCRS() + ticker := newTicker(con.gov, con.round, TickerDKG) + <-ticker.Tick() + // Normally, gov.CRS would return non-nil. Use this for in case of + // unexpected network fluctuation and ensure the robustness. + for (con.gov.CRS(con.round+1) == common.Hash{}) { + con.logger.Info("CRS is not ready yet. Try again later...", + "nodeID", con.ID) + time.Sleep(500 * time.Millisecond) + } + con.cfgModule.registerDKG( + con.round+1, int(con.currentConfig.DKGSetSize/3)+1) + }() + }) + con.event.RegisterTime(startTime.Add(con.currentConfig.RoundInterval*2/3), + func(time.Time) { + func() { + con.dkgReady.L.Lock() + defer con.dkgReady.L.Unlock() + con.dkgRunning = 0 + }() + con.runDKGTSIG(con.round + 1) + }) + con.event.RegisterTime(startTime.Add(con.currentConfig.RoundInterval), + func(time.Time) { + // Change round. + con.round++ + con.logger.Debug("Calling Governance.Configuration", + "round", con.round+1) + con.lattice.AppendConfig(con.round+1, con.gov.Configuration(con.round+1)) + con.initialRound(startTime.Add(con.currentConfig.RoundInterval)) + }) +} + +// Stop the Consensus core. +func (con *Consensus) Stop() { + for _, a := range con.baModules { + a.stop() + } + con.event.Reset() + con.ctxCancel() +} + +func (con *Consensus) processMsg(msgChan <-chan interface{}) { + for { + var msg interface{} + select { + case msg = <-msgChan: + case <-con.ctx.Done(): + return + } + + switch val := msg.(type) { + case *types.Block: + // For sync mode. + if val.IsFinalized() { + if err := con.processFinalizedBlock(val); err != nil { + con.logger.Error("Failed to process finalized block", + "error", err) + } + } else { + if err := con.preProcessBlock(val); err != nil { + con.logger.Error("Failed to pre process block", + "error", err) + } + } + case *types.Vote: + if err := con.ProcessVote(val); err != nil { + con.logger.Error("Failed to process vote", + "error", err) + } + case *types.AgreementResult: + if err := con.ProcessAgreementResult(val); err != nil { + con.logger.Error("Failed to process agreement result", + "error", err) + } + case *types.BlockRandomnessResult: + if err := con.ProcessBlockRandomnessResult(val); err != nil { + con.logger.Error("Failed to process block randomness result", + "error", err) + } + case *types.DKGPrivateShare: + if err := con.cfgModule.processPrivateShare(val); err != nil { + con.logger.Error("Failed to process private share", + "error", err) + } + + case *types.DKGPartialSignature: + if err := con.cfgModule.processPartialSignature(val); err != nil { + con.logger.Error("Failed to process partial signature", + "error", err) + } + } + } +} + +func (con *Consensus) proposeBlock(chainID uint32, round uint64) *types.Block { + block := &types.Block{ + Position: types.Position{ + ChainID: chainID, + Round: round, + }, + } + if err := con.prepareBlock(block, time.Now().UTC()); err != nil { + con.logger.Error("Failed to prepare block", "error", err) + return nil + } + return block +} + +func (con *Consensus) proposeEmptyBlock( + chainID uint32) (*types.Block, error) { + block := &types.Block{ + Position: types.Position{ + ChainID: chainID, + }, + } + if err := con.lattice.PrepareEmptyBlock(block); err != nil { + return nil, err + } + return block, nil +} + +// ProcessVote is the entry point to submit ont vote to a Consensus instance. +func (con *Consensus) ProcessVote(vote *types.Vote) (err error) { + v := vote.Clone() + err = con.baModules[v.Position.ChainID].processVote(v) + return err +} + +// ProcessAgreementResult processes the randomness request. +func (con *Consensus) ProcessAgreementResult( + rand *types.AgreementResult) error { + if rand.Position.Round == 0 { + return nil + } + if !con.ccModule.blockRegistered(rand.BlockHash) { + return nil + } + if DiffUint64(con.round, rand.Position.Round) > 1 { + return nil + } + if len(rand.Votes) <= int(con.currentConfig.NotarySetSize/3*2) { + return ErrNotEnoughVotes + } + if rand.Position.ChainID >= con.currentConfig.NumChains { + return ErrIncorrectAgreementResultPosition + } + notarySet, err := con.nodeSetCache.GetNotarySet( + rand.Position.Round, rand.Position.ChainID) + if err != nil { + return err + } + for _, vote := range rand.Votes { + if _, exist := notarySet[vote.ProposerID]; !exist { + return ErrIncorrectVoteProposer + } + ok, err := verifyVoteSignature(&vote) + if err != nil { + return err + } + if !ok { + return ErrIncorrectVoteSignature + } + } + // Sanity check done. + if !con.cfgModule.touchTSigHash(rand.BlockHash) { + return nil + } + con.logger.Debug("Calling Network.BroadcastAgreementResult", "result", rand) + con.network.BroadcastAgreementResult(rand) + dkgSet, err := con.nodeSetCache.GetDKGSet(rand.Position.Round) + if err != nil { + return err + } + if _, exist := dkgSet[con.ID]; !exist { + return nil + } + psig, err := con.cfgModule.preparePartialSignature(rand.Position.Round, rand.BlockHash) + if err != nil { + return err + } + if err = con.authModule.SignDKGPartialSignature(psig); err != nil { + return err + } + if err = con.cfgModule.processPartialSignature(psig); err != nil { + return err + } + con.logger.Debug("Calling Network.BroadcastDKGPartialSignature", + "proposer", psig.ProposerID, + "round", psig.Round, + "hash", psig.Hash) + con.network.BroadcastDKGPartialSignature(psig) + go func() { + tsig, err := con.cfgModule.runTSig(rand.Position.Round, rand.BlockHash) + if err != nil { + if err != ErrTSigAlreadyRunning { + con.logger.Error("Faield to run TSIG", "error", err) + } + return + } + result := &types.BlockRandomnessResult{ + BlockHash: rand.BlockHash, + Position: rand.Position, + Randomness: tsig.Signature, + } + if err := con.ProcessBlockRandomnessResult(result); err != nil { + con.logger.Error("Failed to process randomness result", + "error", err) + return + } + }() + return nil +} + +// ProcessBlockRandomnessResult processes the randomness result. +func (con *Consensus) ProcessBlockRandomnessResult( + rand *types.BlockRandomnessResult) error { + if rand.Position.Round == 0 { + return nil + } + if !con.ccModule.blockRegistered(rand.BlockHash) { + return nil + } + round := rand.Position.Round + v, ok, err := con.ccModule.tsigVerifier.UpdateAndGet(round) + if err != nil { + return err + } + if !ok { + return nil + } + if !v.VerifySignature( + rand.BlockHash, crypto.Signature{Signature: rand.Randomness}) { + return ErrIncorrectBlockRandomnessResult + } + con.logger.Debug("Calling Network.BroadcastRandomnessResult", + "hash", rand.BlockHash, + "position", rand.Position, + "randomness", hex.EncodeToString(rand.Randomness)) + con.network.BroadcastRandomnessResult(rand) + if err := con.ccModule.processBlockRandomnessResult(rand); err != nil { + if err != ErrBlockNotRegistered { + return err + } + } + return nil +} + +// preProcessBlock performs Byzantine Agreement on the block. +func (con *Consensus) preProcessBlock(b *types.Block) (err error) { + if err = con.lattice.SanityCheck(b, true); err != nil { + return + } + if err = con.baModules[b.Position.ChainID].processBlock(b); err != nil { + return err + } + return +} + +// processBlock is the entry point to submit one block to a Consensus instance. +func (con *Consensus) processBlock(block *types.Block) (err error) { + verifiedBlocks, deliveredBlocks, err := con.lattice.ProcessBlock(block) + if err != nil { + return + } + // Pass verified blocks (pass sanity check) back to BA module. + for _, b := range verifiedBlocks { + if err := + con.baModules[b.Position.ChainID].processBlock(b); err != nil { + return err + } + } + // Pass delivered blocks to compaction chain. + for _, b := range deliveredBlocks { + if err = con.ccModule.processBlock(b); err != nil { + return + } + go con.event.NotifyTime(b.Finalization.Timestamp) + } + deliveredBlocks = con.ccModule.extractBlocks() + for _, b := range deliveredBlocks { + if err = con.db.Put(*b); err != nil { + return + } + // TODO(mission): clone types.FinalizationResult + con.nbModule.BlockDelivered(b.Hash, b.Finalization) + } + if err = con.lattice.PurgeBlocks(deliveredBlocks); err != nil { + return + } + return +} + +// processFinalizedBlock is the entry point for syncing blocks. +func (con *Consensus) processFinalizedBlock(block *types.Block) (err error) { + if err = con.lattice.SanityCheck(block, false); err != nil { + return + } + con.ccModule.processFinalizedBlock(block) + for { + confirmed := con.ccModule.extractFinalizedBlocks() + if len(confirmed) == 0 { + break + } + if err = con.lattice.ctModule.processBlocks(confirmed); err != nil { + return + } + for _, b := range confirmed { + if err = con.db.Put(*b); err != nil { + if err != blockdb.ErrBlockExists { + return + } + err = nil + } + con.nbModule.BlockDelivered(b.Hash, b.Finalization) + if b.Position.Round+2 == con.roundToNotify { + // Only the first block delivered of that round would + // trigger this noitification. + con.gov.NotifyRoundHeight( + con.roundToNotify, b.Finalization.Height) + con.roundToNotify++ + } + } + } + return +} + +// PrepareBlock would setup header fields of block based on its ProposerID. +func (con *Consensus) prepareBlock(b *types.Block, + proposeTime time.Time) (err error) { + if err = con.lattice.PrepareBlock(b, proposeTime); err != nil { + return + } + // TODO(mission): decide CRS by block's round, which could be determined by + // block's info (ex. position, timestamp). + con.logger.Debug("Calling Governance.CRS", "round", 0) + if err = con.authModule.SignCRS(b, con.gov.CRS(0)); err != nil { + return + } + return +} + +// PrepareGenesisBlock would setup header fields for genesis block. +func (con *Consensus) PrepareGenesisBlock(b *types.Block, + proposeTime time.Time) (err error) { + if err = con.prepareBlock(b, proposeTime); err != nil { + return + } + if len(b.Payload) != 0 { + err = ErrGenesisBlockNotEmpty + return + } + return +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto.go new file mode 100644 index 000000000..f3870a5f6 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto.go @@ -0,0 +1,269 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "encoding/binary" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +func hashWitness(witness *types.Witness) (common.Hash, error) { + binaryTimestamp, err := witness.Timestamp.UTC().MarshalBinary() + if err != nil { + return common.Hash{}, err + } + binaryHeight := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryHeight, witness.Height) + return crypto.Keccak256Hash( + binaryHeight, + binaryTimestamp, + witness.Data), nil +} + +func hashBlock(block *types.Block) (common.Hash, error) { + hashPosition := hashPosition(block.Position) + // Handling Block.Acks. + binaryAcks := make([][]byte, len(block.Acks)) + for idx, ack := range block.Acks { + binaryAcks[idx] = ack[:] + } + hashAcks := crypto.Keccak256Hash(binaryAcks...) + binaryTimestamp, err := block.Timestamp.UTC().MarshalBinary() + if err != nil { + return common.Hash{}, err + } + binaryWitness, err := hashWitness(&block.Witness) + if err != nil { + return common.Hash{}, err + } + payloadHash := crypto.Keccak256Hash(block.Payload) + + hash := crypto.Keccak256Hash( + block.ProposerID.Hash[:], + block.ParentHash[:], + hashPosition[:], + hashAcks[:], + binaryTimestamp[:], + payloadHash[:], + binaryWitness[:]) + return hash, nil +} + +func verifyBlockSignature(pubkey crypto.PublicKey, + block *types.Block, sig crypto.Signature) (bool, error) { + hash, err := hashBlock(block) + if err != nil { + return false, err + } + return pubkey.VerifySignature(hash, sig), nil +} + +func hashVote(vote *types.Vote) common.Hash { + binaryPeriod := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryPeriod, vote.Period) + + hashPosition := hashPosition(vote.Position) + + hash := crypto.Keccak256Hash( + vote.ProposerID.Hash[:], + vote.BlockHash[:], + binaryPeriod, + hashPosition[:], + []byte{byte(vote.Type)}, + ) + return hash +} + +func verifyVoteSignature(vote *types.Vote) (bool, error) { + hash := hashVote(vote) + pubKey, err := crypto.SigToPub(hash, vote.Signature) + if err != nil { + return false, err + } + if vote.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashCRS(block *types.Block, crs common.Hash) common.Hash { + hashPos := hashPosition(block.Position) + return crypto.Keccak256Hash(crs[:], hashPos[:]) +} + +func verifyCRSSignature(block *types.Block, crs common.Hash) ( + bool, error) { + hash := hashCRS(block, crs) + pubKey, err := crypto.SigToPub(hash, block.CRSSignature) + if err != nil { + return false, err + } + if block.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashPosition(position types.Position) common.Hash { + binaryChainID := make([]byte, 4) + binary.LittleEndian.PutUint32(binaryChainID, position.ChainID) + + binaryHeight := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryHeight, position.Height) + + return crypto.Keccak256Hash( + binaryChainID, + binaryHeight, + ) +} + +func hashDKGPrivateShare(prvShare *types.DKGPrivateShare) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, prvShare.Round) + + return crypto.Keccak256Hash( + prvShare.ProposerID.Hash[:], + prvShare.ReceiverID.Hash[:], + binaryRound, + prvShare.PrivateShare.Bytes(), + ) +} + +func verifyDKGPrivateShareSignature( + prvShare *types.DKGPrivateShare) (bool, error) { + hash := hashDKGPrivateShare(prvShare) + pubKey, err := crypto.SigToPub(hash, prvShare.Signature) + if err != nil { + return false, err + } + if prvShare.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashDKGMasterPublicKey(mpk *types.DKGMasterPublicKey) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, mpk.Round) + + return crypto.Keccak256Hash( + mpk.ProposerID.Hash[:], + mpk.DKGID.GetLittleEndian(), + mpk.PublicKeyShares.MasterKeyBytes(), + binaryRound, + ) +} + +// VerifyDKGMasterPublicKeySignature verifies DKGMasterPublicKey signature. +func VerifyDKGMasterPublicKeySignature( + mpk *types.DKGMasterPublicKey) (bool, error) { + hash := hashDKGMasterPublicKey(mpk) + pubKey, err := crypto.SigToPub(hash, mpk.Signature) + if err != nil { + return false, err + } + if mpk.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashDKGComplaint(complaint *types.DKGComplaint) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, complaint.Round) + + hashPrvShare := hashDKGPrivateShare(&complaint.PrivateShare) + + return crypto.Keccak256Hash( + complaint.ProposerID.Hash[:], + binaryRound, + hashPrvShare[:], + ) +} + +// VerifyDKGComplaintSignature verifies DKGCompliant signature. +func VerifyDKGComplaintSignature( + complaint *types.DKGComplaint) (bool, error) { + if complaint.Round != complaint.PrivateShare.Round { + return false, nil + } + hash := hashDKGComplaint(complaint) + pubKey, err := crypto.SigToPub(hash, complaint.Signature) + if err != nil { + return false, err + } + if complaint.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + if !complaint.IsNack() { + return verifyDKGPrivateShareSignature(&complaint.PrivateShare) + } + return true, nil +} + +func hashDKGPartialSignature(psig *types.DKGPartialSignature) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, psig.Round) + + return crypto.Keccak256Hash( + psig.ProposerID.Hash[:], + binaryRound, + psig.Hash[:], + psig.PartialSignature.Signature[:], + ) +} + +func verifyDKGPartialSignatureSignature( + psig *types.DKGPartialSignature) (bool, error) { + hash := hashDKGPartialSignature(psig) + pubKey, err := crypto.SigToPub(hash, psig.Signature) + if err != nil { + return false, err + } + if psig.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashDKGFinalize(final *types.DKGFinalize) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, final.Round) + + return crypto.Keccak256Hash( + final.ProposerID.Hash[:], + binaryRound, + ) +} + +// VerifyDKGFinalizeSignature verifies DKGFinalize signature. +func VerifyDKGFinalizeSignature( + final *types.DKGFinalize) (bool, error) { + hash := hashDKGFinalize(final) + pubKey, err := crypto.SigToPub(hash, final.Signature) + if err != nil { + return false, err + } + if final.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/constant.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/constant.go new file mode 100644 index 000000000..91453912f --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/constant.go @@ -0,0 +1,26 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package dkg + +import ( + "github.com/dexon-foundation/bls/ffi/go/bls" +) + +const ( + curve = bls.CurveFp382_2 +) diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/dkg.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/dkg.go new file mode 100644 index 000000000..fca3fcd96 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/dkg.go @@ -0,0 +1,560 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package dkg + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/dexon-foundation/bls/ffi/go/bls" + "github.com/dexon-foundation/dexon/rlp" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" +) + +var ( + // ErrDuplicatedShare is reported when adding an private key share of same id. + ErrDuplicatedShare = fmt.Errorf("invalid share") + // ErrNoIDToRecover is reported when no id is provided for recovering private + // key. + ErrNoIDToRecover = fmt.Errorf("no id to recover private key") + // ErrShareNotFound is reported when the private key share of id is not found + // when recovering private key. + ErrShareNotFound = fmt.Errorf("share not found") +) + +const cryptoType = "bls" + +var publicKeyLength int + +func init() { + bls.Init(curve) + + pubKey := &bls.PublicKey{} + publicKeyLength = len(pubKey.Serialize()) +} + +// PrivateKey represents a private key structure implments +// Crypto.PrivateKey interface. +type PrivateKey struct { + privateKey bls.SecretKey + publicKey PublicKey +} + +// EncodeRLP implements rlp.Encoder +func (prv *PrivateKey) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, prv.Bytes()) +} + +// DecodeRLP implements rlp.Decoder +func (prv *PrivateKey) DecodeRLP(s *rlp.Stream) error { + var b []byte + if err := s.Decode(&b); err != nil { + return err + } + return prv.SetBytes(b) +} + +// MarshalJSON implements json.Marshaller. +func (prv *PrivateKey) MarshalJSON() ([]byte, error) { + return json.Marshal(&prv.privateKey) +} + +// UnmarshalJSON implements json.Unmarshaller. +func (prv *PrivateKey) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &prv.privateKey) +} + +// ID is the id for DKG protocol. +type ID = bls.ID + +// IDs is an array of ID. +type IDs []ID + +// PublicKey represents a public key structure implements +// Crypto.PublicKey interface. +type PublicKey struct { + publicKey bls.PublicKey +} + +// PrivateKeyShares represents a private key shares for DKG protocol. +type PrivateKeyShares struct { + shares []PrivateKey + shareIndex map[ID]int + masterPrivateKey []bls.SecretKey +} + +// Equal check equality between two PrivateKeyShares instances. +func (prvs *PrivateKeyShares) Equal(other *PrivateKeyShares) bool { + // Check shares. + if len(prvs.shareIndex) != len(other.shareIndex) { + return false + } + for dID, idx := range prvs.shareIndex { + otherIdx, exists := other.shareIndex[dID] + if !exists { + return false + } + if !prvs.shares[idx].privateKey.IsEqual( + &other.shares[otherIdx].privateKey) { + return false + } + } + // Check master private keys. + if len(prvs.masterPrivateKey) != len(other.masterPrivateKey) { + return false + } + for idx, m := range prvs.masterPrivateKey { + if m.GetHexString() != other.masterPrivateKey[idx].GetHexString() { + return false + } + } + return true +} + +// PublicKeyShares represents a public key shares for DKG protocol. +type PublicKeyShares struct { + shareCaches []PublicKey + shareCacheIndex map[ID]int + masterPublicKey []bls.PublicKey +} + +type rlpPublicKeyShares struct { + ShareCaches [][]byte + ShareCacheIndexK [][]byte + ShareCacheIndexV []uint32 + MasterPublicKey [][]byte +} + +// Equal checks equality of two PublicKeyShares instance. +func (pubs *PublicKeyShares) Equal(other *PublicKeyShares) bool { + // Check shares. + for dID, idx := range pubs.shareCacheIndex { + otherIdx, exists := other.shareCacheIndex[dID] + if !exists { + continue + } + if !pubs.shareCaches[idx].publicKey.IsEqual( + &other.shareCaches[otherIdx].publicKey) { + return false + } + } + // Check master public keys. + if len(pubs.masterPublicKey) != len(other.masterPublicKey) { + return false + } + for idx, m := range pubs.masterPublicKey { + if m.GetHexString() != other.masterPublicKey[idx].GetHexString() { + return false + } + } + return true +} + +// EncodeRLP implements rlp.Encoder +func (pubs *PublicKeyShares) EncodeRLP(w io.Writer) error { + var rps rlpPublicKeyShares + for _, share := range pubs.shareCaches { + rps.ShareCaches = append(rps.ShareCaches, share.Serialize()) + } + + for id, v := range pubs.shareCacheIndex { + rps.ShareCacheIndexK = append( + rps.ShareCacheIndexK, id.GetLittleEndian()) + rps.ShareCacheIndexV = append(rps.ShareCacheIndexV, uint32(v)) + } + + for _, m := range pubs.masterPublicKey { + rps.MasterPublicKey = append(rps.MasterPublicKey, m.Serialize()) + } + + return rlp.Encode(w, rps) +} + +// DecodeRLP implements rlp.Decoder +func (pubs *PublicKeyShares) DecodeRLP(s *rlp.Stream) error { + var dec rlpPublicKeyShares + if err := s.Decode(&dec); err != nil { + return err + } + + if len(dec.ShareCacheIndexK) != len(dec.ShareCacheIndexV) { + return fmt.Errorf("invalid shareIndex") + } + + ps := NewEmptyPublicKeyShares() + for _, share := range dec.ShareCaches { + var publicKey PublicKey + if err := publicKey.Deserialize(share); err != nil { + return err + } + ps.shareCaches = append(ps.shareCaches, publicKey) + } + + for i, k := range dec.ShareCacheIndexK { + id, err := BytesID(k) + if err != nil { + return err + } + ps.shareCacheIndex[id] = int(dec.ShareCacheIndexV[i]) + } + + for _, k := range dec.MasterPublicKey { + var key bls.PublicKey + if err := key.Deserialize(k); err != nil { + return err + } + ps.masterPublicKey = append(ps.masterPublicKey, key) + } + + *pubs = *ps + return nil +} + +// MarshalJSON implements json.Marshaller. +func (pubs *PublicKeyShares) MarshalJSON() ([]byte, error) { + type Alias PublicKeyShares + data := &struct { + MasterPublicKeys []*bls.PublicKey `json:"master_public_keys"` + }{ + make([]*bls.PublicKey, len(pubs.masterPublicKey)), + } + for i := range pubs.masterPublicKey { + data.MasterPublicKeys[i] = &pubs.masterPublicKey[i] + } + return json.Marshal(data) +} + +// UnmarshalJSON implements json.Unmarshaller. +func (pubs *PublicKeyShares) UnmarshalJSON(data []byte) error { + type Alias PublicKeyShares + aux := &struct { + MasterPublicKeys []*bls.PublicKey `json:"master_public_keys"` + }{} + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + mpk := make([]bls.PublicKey, len(aux.MasterPublicKeys)) + for i, pk := range aux.MasterPublicKeys { + mpk[i] = *pk + } + pubs.masterPublicKey = mpk + return nil +} + +// Clone clones every fields of PublicKeyShares. This method is mainly +// for testing purpose thus would panic when error. +func (pubs *PublicKeyShares) Clone() *PublicKeyShares { + b, err := rlp.EncodeToBytes(pubs) + if err != nil { + panic(err) + } + pubsCopy := NewEmptyPublicKeyShares() + if err := rlp.DecodeBytes(b, pubsCopy); err != nil { + panic(err) + } + return pubsCopy +} + +// NewID creates a ew ID structure. +func NewID(id []byte) ID { + var blsID bls.ID + blsID.SetLittleEndian(id) + return blsID +} + +// BytesID creates a new ID structure, +// It returns err if the byte slice is not valid. +func BytesID(id []byte) (ID, error) { + var blsID bls.ID + err := blsID.SetLittleEndian(id) + return blsID, err +} + +// NewPrivateKey creates a new PrivateKey structure. +func NewPrivateKey() *PrivateKey { + var key bls.SecretKey + key.SetByCSPRNG() + return &PrivateKey{ + privateKey: key, + publicKey: *newPublicKey(&key), + } +} + +// NewPrivateKeyShares creates a DKG private key shares of threshold t. +func NewPrivateKeyShares(t int) (*PrivateKeyShares, *PublicKeyShares) { + var prv bls.SecretKey + prv.SetByCSPRNG() + msk := prv.GetMasterSecretKey(t) + mpk := bls.GetMasterPublicKey(msk) + return &PrivateKeyShares{ + masterPrivateKey: msk, + shareIndex: make(map[ID]int), + }, &PublicKeyShares{ + shareCacheIndex: make(map[ID]int), + masterPublicKey: mpk, + } +} + +// NewEmptyPrivateKeyShares creates an empty private key shares. +func NewEmptyPrivateKeyShares() *PrivateKeyShares { + return &PrivateKeyShares{ + shareIndex: make(map[ID]int), + } +} + +// SetParticipants sets the DKG participants. +func (prvs *PrivateKeyShares) SetParticipants(IDs IDs) { + prvs.shares = make([]PrivateKey, len(IDs)) + prvs.shareIndex = make(map[ID]int, len(IDs)) + for idx, ID := range IDs { + prvs.shares[idx].privateKey.Set(prvs.masterPrivateKey, &ID) + prvs.shareIndex[ID] = idx + } +} + +// AddShare adds a share. +func (prvs *PrivateKeyShares) AddShare(ID ID, share *PrivateKey) error { + if idx, exist := prvs.shareIndex[ID]; exist { + if !share.privateKey.IsEqual(&prvs.shares[idx].privateKey) { + return ErrDuplicatedShare + } + return nil + } + prvs.shareIndex[ID] = len(prvs.shares) + prvs.shares = append(prvs.shares, *share) + return nil +} + +// RecoverPrivateKey recovers private key from the shares. +func (prvs *PrivateKeyShares) RecoverPrivateKey(qualifyIDs IDs) ( + *PrivateKey, error) { + var prv PrivateKey + if len(qualifyIDs) == 0 { + return nil, ErrNoIDToRecover + } + for i, ID := range qualifyIDs { + idx, exist := prvs.shareIndex[ID] + if !exist { + return nil, ErrShareNotFound + } + if i == 0 { + prv.privateKey = prvs.shares[idx].privateKey + continue + } + prv.privateKey.Add(&prvs.shares[idx].privateKey) + } + return &prv, nil +} + +// RecoverPublicKey recovers public key from the shares. +func (prvs *PrivateKeyShares) RecoverPublicKey(qualifyIDs IDs) ( + *PublicKey, error) { + var pub PublicKey + if len(qualifyIDs) == 0 { + return nil, ErrNoIDToRecover + } + for i, ID := range qualifyIDs { + idx, exist := prvs.shareIndex[ID] + if !exist { + return nil, ErrShareNotFound + } + if i == 0 { + pub.publicKey = *prvs.shares[idx].privateKey.GetPublicKey() + continue + } + pub.publicKey.Add(prvs.shares[idx].privateKey.GetPublicKey()) + } + return &pub, nil +} + +// Share returns the share for the ID. +func (prvs *PrivateKeyShares) Share(ID ID) (*PrivateKey, bool) { + idx, exist := prvs.shareIndex[ID] + if !exist { + return nil, false + } + return &prvs.shares[idx], true +} + +// NewEmptyPublicKeyShares creates an empty public key shares. +func NewEmptyPublicKeyShares() *PublicKeyShares { + return &PublicKeyShares{ + shareCacheIndex: make(map[ID]int), + } +} + +// Share returns the share for the ID. +func (pubs *PublicKeyShares) Share(ID ID) (*PublicKey, error) { + idx, exist := pubs.shareCacheIndex[ID] + if exist { + return &pubs.shareCaches[idx], nil + } + var pk PublicKey + if err := pk.publicKey.Set(pubs.masterPublicKey, &ID); err != nil { + return nil, err + } + pubs.AddShare(ID, &pk) + return &pk, nil +} + +// AddShare adds a share. +func (pubs *PublicKeyShares) AddShare(ID ID, share *PublicKey) error { + if idx, exist := pubs.shareCacheIndex[ID]; exist { + if !share.publicKey.IsEqual(&pubs.shareCaches[idx].publicKey) { + return ErrDuplicatedShare + } + return nil + } + pubs.shareCacheIndex[ID] = len(pubs.shareCaches) + pubs.shareCaches = append(pubs.shareCaches, *share) + return nil +} + +// VerifyPrvShare verifies if the private key shares is valid. +func (pubs *PublicKeyShares) VerifyPrvShare(ID ID, share *PrivateKey) ( + bool, error) { + var pk bls.PublicKey + if err := pk.Set(pubs.masterPublicKey, &ID); err != nil { + return false, err + } + return pk.IsEqual(share.privateKey.GetPublicKey()), nil +} + +// VerifyPubShare verifies if the public key shares is valid. +func (pubs *PublicKeyShares) VerifyPubShare(ID ID, share *PublicKey) ( + bool, error) { + var pk bls.PublicKey + if err := pk.Set(pubs.masterPublicKey, &ID); err != nil { + return false, err + } + return pk.IsEqual(&share.publicKey), nil +} + +// RecoverPublicKey recovers private key from the shares. +func (pubs *PublicKeyShares) RecoverPublicKey(qualifyIDs IDs) ( + *PublicKey, error) { + var pub PublicKey + if len(qualifyIDs) == 0 { + return nil, ErrNoIDToRecover + } + for i, ID := range qualifyIDs { + pk, err := pubs.Share(ID) + if err != nil { + return nil, err + } + if i == 0 { + pub.publicKey = pk.publicKey + continue + } + pub.publicKey.Add(&pk.publicKey) + } + return &pub, nil +} + +// MasterKeyBytes returns []byte representation of master public key. +func (pubs *PublicKeyShares) MasterKeyBytes() []byte { + bytes := make([]byte, 0, len(pubs.masterPublicKey)*publicKeyLength) + for _, pk := range pubs.masterPublicKey { + bytes = append(bytes, pk.Serialize()...) + } + return bytes +} + +// newPublicKey creates a new PublicKey structure. +func newPublicKey(prvKey *bls.SecretKey) *PublicKey { + return &PublicKey{ + publicKey: *prvKey.GetPublicKey(), + } +} + +// newPublicKeyFromBytes create a new PublicKey structure +// from bytes representation of bls.PublicKey +func newPublicKeyFromBytes(b []byte) (*PublicKey, error) { + var pub PublicKey + err := pub.publicKey.Deserialize(b) + return &pub, err +} + +// PublicKey returns the public key associate this private key. +func (prv *PrivateKey) PublicKey() crypto.PublicKey { + return prv.publicKey +} + +// Sign calculates a signature. +func (prv *PrivateKey) Sign(hash common.Hash) (crypto.Signature, error) { + msg := string(hash[:]) + sign := prv.privateKey.Sign(msg) + return crypto.Signature{ + Type: cryptoType, + Signature: sign.Serialize(), + }, nil +} + +// Bytes returns []byte representation of private key. +func (prv *PrivateKey) Bytes() []byte { + return prv.privateKey.GetLittleEndian() +} + +// SetBytes sets the private key data to []byte. +func (prv *PrivateKey) SetBytes(bytes []byte) error { + var key bls.SecretKey + if err := key.SetLittleEndian(bytes); err != nil { + return err + } + prv.privateKey = key + prv.publicKey = *newPublicKey(&prv.privateKey) + return nil +} + +// String returns string representation of privat key. +func (prv *PrivateKey) String() string { + return prv.privateKey.GetHexString() +} + +// VerifySignature checks that the given public key created signature over hash. +func (pub PublicKey) VerifySignature( + hash common.Hash, signature crypto.Signature) bool { + if len(signature.Signature) == 0 { + return false + } + var sig bls.Sign + if err := sig.Deserialize(signature.Signature[:]); err != nil { + fmt.Println(err) + return false + } + msg := string(hash[:]) + return sig.Verify(&pub.publicKey, msg) +} + +// Bytes returns []byte representation of public key. +func (pub PublicKey) Bytes() []byte { + return pub.publicKey.Serialize() +} + +// Serialize return bytes representation of public key. +func (pub *PublicKey) Serialize() []byte { + return pub.publicKey.Serialize() +} + +// Deserialize parses bytes representation of public key. +func (pub *PublicKey) Deserialize(b []byte) error { + return pub.publicKey.Deserialize(b) +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/utils.go new file mode 100644 index 000000000..f8d11b3c9 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/utils.go @@ -0,0 +1,71 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package dkg + +import ( + "fmt" + + "github.com/dexon-foundation/bls/ffi/go/bls" + + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" +) + +// PartialSignature is a partial signature in DKG+TSIG protocol. +type PartialSignature crypto.Signature + +var ( + // ErrEmptySignature is reported if the signature is empty. + ErrEmptySignature = fmt.Errorf("invalid empty signature") +) + +// RecoverSignature recovers TSIG signature. +func RecoverSignature(sigs []PartialSignature, signerIDs IDs) ( + crypto.Signature, error) { + blsSigs := make([]bls.Sign, len(sigs)) + for i, sig := range sigs { + if len(sig.Signature) == 0 { + return crypto.Signature{}, ErrEmptySignature + } + if err := blsSigs[i].Deserialize([]byte(sig.Signature)); err != nil { + return crypto.Signature{}, err + } + } + var recoverSig bls.Sign + if err := recoverSig.Recover(blsSigs, []bls.ID(signerIDs)); err != nil { + return crypto.Signature{}, err + } + return crypto.Signature{ + Type: cryptoType, + Signature: recoverSig.Serialize()}, nil +} + +// RecoverGroupPublicKey recovers group public key. +func RecoverGroupPublicKey(pubShares []*PublicKeyShares) *PublicKey { + var pub *PublicKey + for _, pubShare := range pubShares { + pk0 := pubShare.masterPublicKey[0] + if pub == nil { + pub = &PublicKey{ + publicKey: pk0, + } + } else { + pub.publicKey.Add(&pk0) + } + } + return pub +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/ecdsa/ecdsa.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/ecdsa/ecdsa.go new file mode 100644 index 000000000..49d5b28a2 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/ecdsa/ecdsa.go @@ -0,0 +1,133 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package ecdsa + +import ( + "crypto/ecdsa" + + dexCrypto "github.com/dexon-foundation/dexon/crypto" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" +) + +const cryptoType = "ecdsa" + +func init() { + crypto.RegisterSigToPub(cryptoType, SigToPub) +} + +// PrivateKey represents a private key structure used in geth and implments +// Crypto.PrivateKey interface. +type PrivateKey struct { + privateKey *ecdsa.PrivateKey +} + +// PublicKey represents a public key structure used in geth and implements +// Crypto.PublicKey interface. +type PublicKey struct { + publicKey *ecdsa.PublicKey +} + +// NewPrivateKey creates a new PrivateKey structure. +func NewPrivateKey() (*PrivateKey, error) { + key, err := dexCrypto.GenerateKey() + if err != nil { + return nil, err + } + return &PrivateKey{privateKey: key}, nil +} + +// NewPrivateKeyFromECDSA creates a new PrivateKey structure from +// ecdsa.PrivateKey. +func NewPrivateKeyFromECDSA(key *ecdsa.PrivateKey) *PrivateKey { + return &PrivateKey{privateKey: key} +} + +// NewPublicKeyFromECDSA creates a new PublicKey structure from +// ecdsa.PublicKey. +func NewPublicKeyFromECDSA(key *ecdsa.PublicKey) *PublicKey { + return &PublicKey{publicKey: key} +} + +// NewPublicKeyFromByteSlice constructs an eth.publicKey instance from +// a byte slice. +func NewPublicKeyFromByteSlice(b []byte) (crypto.PublicKey, error) { + pub, err := dexCrypto.UnmarshalPubkey(b) + if err != nil { + return &PublicKey{}, err + } + return &PublicKey{publicKey: pub}, nil +} + +// PublicKey returns the public key associate this private key. +func (prv *PrivateKey) PublicKey() crypto.PublicKey { + return NewPublicKeyFromECDSA(&(prv.privateKey.PublicKey)) +} + +// Sign calculates an ECDSA signature. +// +// This function is susceptible to chosen plaintext attacks that can leak +// information about the private key that is used for signing. Callers must +// be aware that the given hash cannot be chosen by an adversery. Common +// solution is to hash any input before calculating the signature. +// +// The produced signature is in the [R || S || V] format where V is 0 or 1. +func (prv *PrivateKey) Sign(hash common.Hash) ( + sig crypto.Signature, err error) { + s, err := dexCrypto.Sign(hash[:], prv.privateKey) + sig = crypto.Signature{ + Type: cryptoType, + Signature: s, + } + return +} + +// VerifySignature checks that the given public key created signature over hash. +// The public key should be in compressed (33 bytes) or uncompressed (65 bytes) +// format. +// The signature should have the 64 byte [R || S] format. +func (pub *PublicKey) VerifySignature( + hash common.Hash, signature crypto.Signature) bool { + sig := signature.Signature + if len(sig) == 65 { + // The last byte is for ecrecover. + sig = sig[:64] + } + return dexCrypto.VerifySignature(pub.Bytes(), hash[:], sig) +} + +// Compress encodes a public key to the 33-byte compressed format. +func (pub *PublicKey) Compress() []byte { + return dexCrypto.CompressPubkey(pub.publicKey) +} + +// Bytes returns the []byte representation of uncompressed public key. (65 bytes) +func (pub *PublicKey) Bytes() []byte { + return dexCrypto.FromECDSAPub(pub.publicKey) +} + +// SigToPub returns the PublicKey that created the given signature. +func SigToPub( + hash common.Hash, signature crypto.Signature) (crypto.PublicKey, error) { + key, err := dexCrypto.SigToPub(hash[:], signature.Signature[:]) + if err != nil { + return &PublicKey{}, err + } + return &PublicKey{publicKey: key}, nil +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/interfaces.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/interfaces.go new file mode 100644 index 000000000..fb1dcbe80 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/interfaces.go @@ -0,0 +1,48 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package crypto + +import ( + "github.com/dexon-foundation/dexon-consensus-core/common" +) + +// Signature is the basic signature type in DEXON. +type Signature struct { + Type string + Signature []byte +} + +// PrivateKey describes the asymmetric cryptography interface that interacts +// with the private key. +type PrivateKey interface { + // PublicKey returns the public key associate this private key. + PublicKey() PublicKey + + // Sign calculates a signature. + Sign(hash common.Hash) (Signature, error) +} + +// PublicKey describes the asymmetric cryptography interface that interacts +// with the public key. +type PublicKey interface { + // VerifySignature checks that the given public key created signature over hash. + VerifySignature(hash common.Hash, signature Signature) bool + + // Bytes returns the []byte representation of public key. + Bytes() []byte +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/utils.go new file mode 100644 index 000000000..d56c28a41 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/utils.go @@ -0,0 +1,80 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package crypto + +import ( + "encoding/hex" + "fmt" + + "github.com/dexon-foundation/dexon/crypto" + + "github.com/dexon-foundation/dexon-consensus-core/common" +) + +var ( + // ErrSigToPubTypeNotFound is reported if the type is already used. + ErrSigToPubTypeNotFound = fmt.Errorf("type of sigToPub is not found") + + // ErrSigToPubTypeAlreadyExist is reported if the type is already used. + ErrSigToPubTypeAlreadyExist = fmt.Errorf("type of sigToPub is already exist") +) + +// SigToPubFn is a function to recover public key from signature. +type SigToPubFn func(hash common.Hash, signature Signature) (PublicKey, error) + +var sigToPubCB map[string]SigToPubFn + +func init() { + sigToPubCB = make(map[string]SigToPubFn) +} + +// Keccak256Hash calculates and returns the Keccak256 hash of the input data, +// converting it to an internal Hash data structure. +func Keccak256Hash(data ...[]byte) (h common.Hash) { + return common.Hash(crypto.Keccak256Hash(data...)) +} + +// Clone returns a deep copy of a signature. +func (sig Signature) Clone() Signature { + return Signature{ + Type: sig.Type, + Signature: sig.Signature[:], + } +} + +func (sig Signature) String() string { + return hex.EncodeToString([]byte(sig.Signature[:])) +} + +// RegisterSigToPub registers a sigToPub function of type. +func RegisterSigToPub(sigType string, sigToPub SigToPubFn) error { + if _, exist := sigToPubCB[sigType]; exist { + return ErrSigToPubTypeAlreadyExist + } + sigToPubCB[sigType] = sigToPub + return nil +} + +// SigToPub recovers public key from signature. +func SigToPub(hash common.Hash, signature Signature) (PublicKey, error) { + sigToPub, exist := sigToPubCB[signature.Type] + if !exist { + return nil, ErrSigToPubTypeNotFound + } + return sigToPub(hash, signature) +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/dkg-tsig-protocol.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/dkg-tsig-protocol.go new file mode 100644 index 000000000..bb4193193 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/dkg-tsig-protocol.go @@ -0,0 +1,560 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "fmt" + "sync" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// Errors for dkg module. +var ( + ErrNotDKGParticipant = fmt.Errorf( + "not a DKG participant") + ErrNotQualifyDKGParticipant = fmt.Errorf( + "not a qualified DKG participant") + ErrIDShareNotFound = fmt.Errorf( + "private share not found for specific ID") + ErrNotReachThreshold = fmt.Errorf( + "threshold not reach") + ErrInvalidThreshold = fmt.Errorf( + "invalid threshold") + ErrIncorrectPrivateShareSignature = fmt.Errorf( + "incorrect private share signature") + ErrMismatchPartialSignatureHash = fmt.Errorf( + "mismatch partialSignature hash") + ErrIncorrectPartialSignatureSignature = fmt.Errorf( + "incorrect partialSignature signature") + ErrIncorrectPartialSignature = fmt.Errorf( + "incorrect partialSignature") + ErrNotEnoughtPartialSignatures = fmt.Errorf( + "not enough of partial signatures") + ErrRoundAlreadyPurged = fmt.Errorf( + "cache of round already been purged") +) + +type dkgReceiver interface { + // ProposeDKGComplaint proposes a DKGComplaint. + ProposeDKGComplaint(complaint *types.DKGComplaint) + + // ProposeDKGMasterPublicKey propose a DKGMasterPublicKey. + ProposeDKGMasterPublicKey(mpk *types.DKGMasterPublicKey) + + // ProposeDKGPrivateShare propose a DKGPrivateShare. + ProposeDKGPrivateShare(prv *types.DKGPrivateShare) + + // ProposeDKGAntiNackComplaint propose a DKGPrivateShare as an anti complaint. + ProposeDKGAntiNackComplaint(prv *types.DKGPrivateShare) + + // ProposeDKGFinalize propose a DKGFinalize message. + ProposeDKGFinalize(final *types.DKGFinalize) +} + +type dkgProtocol struct { + ID types.NodeID + recv dkgReceiver + round uint64 + threshold int + idMap map[types.NodeID]dkg.ID + mpkMap map[types.NodeID]*dkg.PublicKeyShares + masterPrivateShare *dkg.PrivateKeyShares + prvShares *dkg.PrivateKeyShares + prvSharesReceived map[types.NodeID]struct{} + nodeComplained map[types.NodeID]struct{} + // Complaint[from][to]'s anti is saved to antiComplaint[from][to]. + antiComplaintReceived map[types.NodeID]map[types.NodeID]struct{} +} + +type dkgShareSecret struct { + privateKey *dkg.PrivateKey +} + +// DKGGroupPublicKey is the result of DKG protocol. +type DKGGroupPublicKey struct { + round uint64 + qualifyIDs dkg.IDs + qualifyNodeIDs map[types.NodeID]struct{} + idMap map[types.NodeID]dkg.ID + publicKeys map[types.NodeID]*dkg.PublicKey + groupPublicKey *dkg.PublicKey + threshold int +} + +// TSigVerifier is the interface verifying threshold signature. +type TSigVerifier interface { + VerifySignature(hash common.Hash, sig crypto.Signature) bool +} + +// TSigVerifierCache is the cache for TSigVerifier. +type TSigVerifierCache struct { + gov Governance + verifier map[uint64]TSigVerifier + minRound uint64 + cacheSize int + lock sync.RWMutex +} + +type tsigProtocol struct { + groupPublicKey *DKGGroupPublicKey + hash common.Hash + sigs map[dkg.ID]dkg.PartialSignature + threshold int +} + +func newDKGID(ID types.NodeID) dkg.ID { + return dkg.NewID(ID.Hash[:]) +} + +func newDKGProtocol( + ID types.NodeID, + recv dkgReceiver, + round uint64, + threshold int) *dkgProtocol { + + prvShare, pubShare := dkg.NewPrivateKeyShares(threshold) + + recv.ProposeDKGMasterPublicKey(&types.DKGMasterPublicKey{ + ProposerID: ID, + Round: round, + DKGID: newDKGID(ID), + PublicKeyShares: *pubShare, + }) + + return &dkgProtocol{ + ID: ID, + recv: recv, + round: round, + threshold: threshold, + idMap: make(map[types.NodeID]dkg.ID), + mpkMap: make(map[types.NodeID]*dkg.PublicKeyShares), + masterPrivateShare: prvShare, + prvShares: dkg.NewEmptyPrivateKeyShares(), + prvSharesReceived: make(map[types.NodeID]struct{}), + nodeComplained: make(map[types.NodeID]struct{}), + antiComplaintReceived: make(map[types.NodeID]map[types.NodeID]struct{}), + } +} + +func (d *dkgProtocol) processMasterPublicKeys( + mpks []*types.DKGMasterPublicKey) error { + d.idMap = make(map[types.NodeID]dkg.ID, len(mpks)) + d.mpkMap = make(map[types.NodeID]*dkg.PublicKeyShares, len(mpks)) + d.prvSharesReceived = make(map[types.NodeID]struct{}, len(mpks)) + ids := make(dkg.IDs, len(mpks)) + for i := range mpks { + nID := mpks[i].ProposerID + d.idMap[nID] = mpks[i].DKGID + d.mpkMap[nID] = &mpks[i].PublicKeyShares + ids[i] = mpks[i].DKGID + } + d.masterPrivateShare.SetParticipants(ids) + for _, mpk := range mpks { + share, ok := d.masterPrivateShare.Share(mpk.DKGID) + if !ok { + return ErrIDShareNotFound + } + d.recv.ProposeDKGPrivateShare(&types.DKGPrivateShare{ + ProposerID: d.ID, + ReceiverID: mpk.ProposerID, + Round: d.round, + PrivateShare: *share, + }) + } + return nil +} + +func (d *dkgProtocol) proposeNackComplaints() { + for nID := range d.mpkMap { + if _, exist := d.prvSharesReceived[nID]; exist { + continue + } + d.recv.ProposeDKGComplaint(&types.DKGComplaint{ + ProposerID: d.ID, + Round: d.round, + PrivateShare: types.DKGPrivateShare{ + ProposerID: nID, + Round: d.round, + }, + }) + } +} + +func (d *dkgProtocol) processNackComplaints(complaints []*types.DKGComplaint) ( + err error) { + for _, complaint := range complaints { + if !complaint.IsNack() { + continue + } + if complaint.PrivateShare.ProposerID != d.ID { + continue + } + id, exist := d.idMap[complaint.ProposerID] + if !exist { + err = ErrNotDKGParticipant + continue + } + share, ok := d.masterPrivateShare.Share(id) + if !ok { + err = ErrIDShareNotFound + continue + } + d.recv.ProposeDKGAntiNackComplaint(&types.DKGPrivateShare{ + ProposerID: d.ID, + ReceiverID: complaint.ProposerID, + Round: d.round, + PrivateShare: *share, + }) + } + return +} + +func (d *dkgProtocol) enforceNackComplaints(complaints []*types.DKGComplaint) { + for _, complaint := range complaints { + if !complaint.IsNack() { + continue + } + to := complaint.PrivateShare.ProposerID + // Do not propose nack complaint to itself. + if to == d.ID { + continue + } + from := complaint.ProposerID + // Nack complaint is already proposed. + if from == d.ID { + continue + } + if _, exist := + d.antiComplaintReceived[from][to]; !exist { + d.recv.ProposeDKGComplaint(&types.DKGComplaint{ + ProposerID: d.ID, + Round: d.round, + PrivateShare: types.DKGPrivateShare{ + ProposerID: to, + Round: d.round, + }, + }) + } + } +} + +func (d *dkgProtocol) sanityCheck(prvShare *types.DKGPrivateShare) error { + if _, exist := d.idMap[prvShare.ProposerID]; !exist { + return ErrNotDKGParticipant + } + ok, err := verifyDKGPrivateShareSignature(prvShare) + if err != nil { + return err + } + if !ok { + return ErrIncorrectPrivateShareSignature + } + return nil +} + +func (d *dkgProtocol) processPrivateShare( + prvShare *types.DKGPrivateShare) error { + if d.round != prvShare.Round { + return nil + } + receiverID, exist := d.idMap[prvShare.ReceiverID] + // This node is not a DKG participant, ignore the private share. + if !exist { + return nil + } + if err := d.sanityCheck(prvShare); err != nil { + return err + } + mpk := d.mpkMap[prvShare.ProposerID] + ok, err := mpk.VerifyPrvShare(receiverID, &prvShare.PrivateShare) + if err != nil { + return err + } + if prvShare.ReceiverID == d.ID { + d.prvSharesReceived[prvShare.ProposerID] = struct{}{} + } + if !ok { + if _, exist := d.nodeComplained[prvShare.ProposerID]; exist { + return nil + } + complaint := &types.DKGComplaint{ + ProposerID: d.ID, + Round: d.round, + PrivateShare: *prvShare, + } + d.nodeComplained[prvShare.ProposerID] = struct{}{} + d.recv.ProposeDKGComplaint(complaint) + } else if prvShare.ReceiverID == d.ID { + sender := d.idMap[prvShare.ProposerID] + if err := d.prvShares.AddShare(sender, &prvShare.PrivateShare); err != nil { + return err + } + } else { + // The prvShare is an anti complaint. + if _, exist := d.antiComplaintReceived[prvShare.ReceiverID]; !exist { + d.antiComplaintReceived[prvShare.ReceiverID] = + make(map[types.NodeID]struct{}) + d.recv.ProposeDKGAntiNackComplaint(prvShare) + } + d.antiComplaintReceived[prvShare.ReceiverID][prvShare.ProposerID] = + struct{}{} + } + return nil +} + +func (d *dkgProtocol) proposeFinalize() { + d.recv.ProposeDKGFinalize(&types.DKGFinalize{ + ProposerID: d.ID, + Round: d.round, + }) +} + +func (d *dkgProtocol) recoverShareSecret(qualifyIDs dkg.IDs) ( + *dkgShareSecret, error) { + if len(qualifyIDs) < d.threshold { + return nil, ErrNotReachThreshold + } + prvKey, err := d.prvShares.RecoverPrivateKey(qualifyIDs) + if err != nil { + return nil, err + } + return &dkgShareSecret{ + privateKey: prvKey, + }, nil +} + +func (ss *dkgShareSecret) sign(hash common.Hash) dkg.PartialSignature { + // DKG sign will always success. + sig, _ := ss.privateKey.Sign(hash) + return dkg.PartialSignature(sig) +} + +// NewDKGGroupPublicKey creats a DKGGroupPublicKey instance. +func NewDKGGroupPublicKey( + round uint64, + mpks []*types.DKGMasterPublicKey, complaints []*types.DKGComplaint, + threshold int) ( + *DKGGroupPublicKey, error) { + + if len(mpks) < threshold { + return nil, ErrInvalidThreshold + } + + // Calculate qualify members. + disqualifyIDs := map[types.NodeID]struct{}{} + complaintsByID := map[types.NodeID]int{} + for _, complaint := range complaints { + if complaint.IsNack() { + complaintsByID[complaint.PrivateShare.ProposerID]++ + } else { + disqualifyIDs[complaint.PrivateShare.ProposerID] = struct{}{} + } + } + for nID, num := range complaintsByID { + if num > threshold { + disqualifyIDs[nID] = struct{}{} + } + } + qualifyIDs := make(dkg.IDs, 0, len(mpks)-len(disqualifyIDs)) + qualifyNodeIDs := make(map[types.NodeID]struct{}) + mpkMap := make(map[dkg.ID]*types.DKGMasterPublicKey, cap(qualifyIDs)) + idMap := make(map[types.NodeID]dkg.ID) + for _, mpk := range mpks { + if _, exist := disqualifyIDs[mpk.ProposerID]; exist { + continue + } + mpkMap[mpk.DKGID] = mpk + idMap[mpk.ProposerID] = mpk.DKGID + qualifyIDs = append(qualifyIDs, mpk.DKGID) + qualifyNodeIDs[mpk.ProposerID] = struct{}{} + } + // Recover qualify members' public key. + pubKeys := make(map[types.NodeID]*dkg.PublicKey, len(qualifyIDs)) + for _, recvID := range qualifyIDs { + pubShares := dkg.NewEmptyPublicKeyShares() + for _, id := range qualifyIDs { + pubShare, err := mpkMap[id].PublicKeyShares.Share(recvID) + if err != nil { + return nil, err + } + if err := pubShares.AddShare(id, pubShare); err != nil { + return nil, err + } + } + pubKey, err := pubShares.RecoverPublicKey(qualifyIDs) + if err != nil { + return nil, err + } + pubKeys[mpkMap[recvID].ProposerID] = pubKey + } + // Recover Group Public Key. + pubShares := make([]*dkg.PublicKeyShares, 0, len(qualifyIDs)) + for _, id := range qualifyIDs { + pubShares = append(pubShares, &mpkMap[id].PublicKeyShares) + } + groupPK := dkg.RecoverGroupPublicKey(pubShares) + return &DKGGroupPublicKey{ + round: round, + qualifyIDs: qualifyIDs, + qualifyNodeIDs: qualifyNodeIDs, + idMap: idMap, + publicKeys: pubKeys, + threshold: threshold, + groupPublicKey: groupPK, + }, nil +} + +// VerifySignature verifies if the signature is correct. +func (gpk *DKGGroupPublicKey) VerifySignature( + hash common.Hash, sig crypto.Signature) bool { + return gpk.groupPublicKey.VerifySignature(hash, sig) +} + +// NewTSigVerifierCache creats a DKGGroupPublicKey instance. +func NewTSigVerifierCache(gov Governance, cacheSize int) *TSigVerifierCache { + return &TSigVerifierCache{ + gov: gov, + verifier: make(map[uint64]TSigVerifier), + cacheSize: cacheSize, + } +} + +// UpdateAndGet calls Update and then Get. +func (tc *TSigVerifierCache) UpdateAndGet(round uint64) ( + TSigVerifier, bool, error) { + ok, err := tc.Update(round) + if err != nil { + return nil, false, err + } + if !ok { + return nil, false, nil + } + v, ok := tc.Get(round) + return v, ok, nil +} + +// Update the cache and returns if success. +func (tc *TSigVerifierCache) Update(round uint64) (bool, error) { + tc.lock.Lock() + defer tc.lock.Unlock() + if round < tc.minRound { + return false, ErrRoundAlreadyPurged + } + if _, exist := tc.verifier[round]; exist { + return true, nil + } + if !tc.gov.IsDKGFinal(round) { + return false, nil + } + gpk, err := NewDKGGroupPublicKey(round, + tc.gov.DKGMasterPublicKeys(round), + tc.gov.DKGComplaints(round), + int(tc.gov.Configuration(round).DKGSetSize/3)+1) + if err != nil { + return false, err + } + if len(tc.verifier) == 0 { + tc.minRound = round + } + tc.verifier[round] = gpk + if len(tc.verifier) > tc.cacheSize { + delete(tc.verifier, tc.minRound) + } + for { + if _, exist := tc.verifier[tc.minRound]; !exist { + tc.minRound++ + } else { + break + } + } + return true, nil +} + +// Get the TSigVerifier of round and returns if it exists. +func (tc *TSigVerifierCache) Get(round uint64) (TSigVerifier, bool) { + tc.lock.RLock() + defer tc.lock.RUnlock() + verifier, exist := tc.verifier[round] + return verifier, exist +} + +func newTSigProtocol( + gpk *DKGGroupPublicKey, + hash common.Hash) *tsigProtocol { + return &tsigProtocol{ + groupPublicKey: gpk, + hash: hash, + sigs: make(map[dkg.ID]dkg.PartialSignature, gpk.threshold+1), + } +} + +func (tsig *tsigProtocol) sanityCheck(psig *types.DKGPartialSignature) error { + _, exist := tsig.groupPublicKey.publicKeys[psig.ProposerID] + if !exist { + return ErrNotQualifyDKGParticipant + } + ok, err := verifyDKGPartialSignatureSignature(psig) + if err != nil { + return err + } + if !ok { + return ErrIncorrectPartialSignatureSignature + } + if psig.Hash != tsig.hash { + return ErrMismatchPartialSignatureHash + } + return nil +} + +func (tsig *tsigProtocol) processPartialSignature( + psig *types.DKGPartialSignature) error { + if psig.Round != tsig.groupPublicKey.round { + return nil + } + id, exist := tsig.groupPublicKey.idMap[psig.ProposerID] + if !exist { + return ErrNotQualifyDKGParticipant + } + if err := tsig.sanityCheck(psig); err != nil { + return err + } + pubKey := tsig.groupPublicKey.publicKeys[psig.ProposerID] + if !pubKey.VerifySignature( + tsig.hash, crypto.Signature(psig.PartialSignature)) { + return ErrIncorrectPartialSignature + } + tsig.sigs[id] = psig.PartialSignature + return nil +} + +func (tsig *tsigProtocol) signature() (crypto.Signature, error) { + if len(tsig.sigs) < tsig.groupPublicKey.threshold { + return crypto.Signature{}, ErrNotEnoughtPartialSignatures + } + ids := make(dkg.IDs, 0, len(tsig.sigs)) + psigs := make([]dkg.PartialSignature, 0, len(tsig.sigs)) + for id, psig := range tsig.sigs { + ids = append(ids, id) + psigs = append(psigs, psig) + } + return dkg.RecoverSignature(psigs, ids) +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/interfaces.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/interfaces.go new file mode 100644 index 000000000..7b985cf7a --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/interfaces.go @@ -0,0 +1,138 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "time" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// Application describes the application interface that interacts with DEXON +// consensus core. +type Application interface { + // PreparePayload is called when consensus core is preparing a block. + PreparePayload(position types.Position) ([]byte, error) + + // PrepareWitness will return the witness data no lower than consensusHeight. + PrepareWitness(consensusHeight uint64) (types.Witness, error) + + // VerifyBlock verifies if the block is valid. + VerifyBlock(block *types.Block) bool + + // BlockConfirmed is called when a block is confirmed and added to lattice. + BlockConfirmed(block types.Block) + + // BlockDelivered is called when a block is add to the compaction chain. + BlockDelivered(blockHash common.Hash, result types.FinalizationResult) +} + +// Debug describes the application interface that requires +// more detailed consensus execution. +type Debug interface { + // StronglyAcked is called when a block is strongly acked. + StronglyAcked(blockHash common.Hash) + + // TotalOrderingDelivered is called when the total ordering algorithm deliver + // a set of block. + TotalOrderingDelivered(common.Hashes, uint32) +} + +// Network describs the network interface that interacts with DEXON consensus +// core. +type Network interface { + // BroadcastVote broadcasts vote to all nodes in DEXON network. + BroadcastVote(vote *types.Vote) + + // BroadcastBlock broadcasts block to all nodes in DEXON network. + BroadcastBlock(block *types.Block) + + // BroadcastAgreementResult broadcasts agreement result to DKG set. + BroadcastAgreementResult(randRequest *types.AgreementResult) + + // BroadcastRandomnessResult broadcasts rand request to Notary set. + BroadcastRandomnessResult(randResult *types.BlockRandomnessResult) + + // SendDKGPrivateShare sends PrivateShare to a DKG participant. + SendDKGPrivateShare(pub crypto.PublicKey, prvShare *types.DKGPrivateShare) + + // BroadcastDKGPrivateShare broadcasts PrivateShare to all DKG participants. + BroadcastDKGPrivateShare(prvShare *types.DKGPrivateShare) + + // BroadcastDKGPartialSignature broadcasts partialSignature to all + // DKG participants. + BroadcastDKGPartialSignature(psig *types.DKGPartialSignature) + + // ReceiveChan returns a channel to receive messages from DEXON network. + ReceiveChan() <-chan interface{} +} + +// Governance interface specifies interface to control the governance contract. +// Note that there are a lot more methods in the governance contract, that this +// interface only define those that are required to run the consensus algorithm. +type Governance interface { + // Configuration returns the configuration at a given round. + // Return the genesis configuration if round == 0. + Configuration(round uint64) *types.Config + + // CRS returns the CRS for a given round. + // Return the genesis CRS if round == 0. + CRS(round uint64) common.Hash + + // Propose a CRS of round. + ProposeCRS(round uint64, signedCRS []byte) + + // NodeSet returns the node set at a given round. + // Return the genesis node set if round == 0. + NodeSet(round uint64) []crypto.PublicKey + + // NotifyRoundHeight notifies governance contract to generate configuration + // for that round with the block on that consensus height. + NotifyRoundHeight(targetRound, consensusHeight uint64) + + //// DKG-related methods. + + // AddDKGComplaint adds a DKGComplaint. + AddDKGComplaint(round uint64, complaint *types.DKGComplaint) + + // DKGComplaints gets all the DKGComplaints of round. + DKGComplaints(round uint64) []*types.DKGComplaint + + // AddDKGMasterPublicKey adds a DKGMasterPublicKey. + AddDKGMasterPublicKey(round uint64, masterPublicKey *types.DKGMasterPublicKey) + + // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round. + DKGMasterPublicKeys(round uint64) []*types.DKGMasterPublicKey + + // AddDKGFinalize adds a DKG finalize message. + AddDKGFinalize(round uint64, final *types.DKGFinalize) + + // IsDKGFinal checks if DKG is final. + IsDKGFinal(round uint64) bool +} + +// Ticker define the capability to tick by interval. +type Ticker interface { + // Tick would return a channel, which would be triggered until next tick. + Tick() <-chan time.Time + + // Stop the ticker. + Stop() +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/lattice-data.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/lattice-data.go new file mode 100644 index 000000000..31604a6d7 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/lattice-data.go @@ -0,0 +1,589 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "errors" + "fmt" + "sort" + "time" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/blockdb" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// Errors for sanity check error. +var ( + ErrAckingBlockNotExists = fmt.Errorf("acking block not exists") + ErrDuplicatedAckOnOneChain = fmt.Errorf("duplicated ack on one chain") + ErrInvalidChainID = fmt.Errorf("invalid chain id") + ErrInvalidProposerID = fmt.Errorf("invalid proposer id") + ErrInvalidWitness = fmt.Errorf("invalid witness data") + ErrInvalidBlock = fmt.Errorf("invalid block") + ErrNotAckParent = fmt.Errorf("not ack parent") + ErrDoubleAck = fmt.Errorf("double ack") + ErrAcksNotSorted = fmt.Errorf("acks not sorted") + ErrInvalidBlockHeight = fmt.Errorf("invalid block height") + ErrAlreadyInLattice = fmt.Errorf("block already in lattice") + ErrIncorrectBlockTime = fmt.Errorf("block timestamp is incorrect") + ErrInvalidRoundID = fmt.Errorf("invalid round id") + ErrUnknownRoundID = fmt.Errorf("unknown round id") + ErrRoundOutOfRange = fmt.Errorf("round out of range") + ErrRoundNotSwitch = fmt.Errorf("round not switch") + ErrNotGenesisBlock = fmt.Errorf("not a genesis block") + ErrUnexpectedGenesisBlock = fmt.Errorf("unexpected genesis block") +) + +// Errors for method usage +var ( + ErrRoundNotIncreasing = errors.New("round not increasing") + ErrPurgedBlockNotFound = errors.New("purged block not found") + ErrPurgeNotDeliveredBlock = errors.New("not purge from head") +) + +// latticeDataConfig is the configuration for latticeData for each round. +type latticeDataConfig struct { + roundBasedConfig + // Number of chains between runs + numChains uint32 + // Block interval specifies reasonable time difference between + // parent/child blocks. + minBlockTimeInterval time.Duration + maxBlockTimeInterval time.Duration +} + +// Initiate latticeDataConfig from types.Config. +func (config *latticeDataConfig) fromConfig(roundID uint64, cfg *types.Config) { + config.numChains = cfg.NumChains + config.minBlockTimeInterval = cfg.MinBlockInterval + config.maxBlockTimeInterval = cfg.MaxBlockInterval + config.setupRoundBasedFields(roundID, cfg) +} + +// Check if timestamp of a block is valid according to a reference time. +func (config *latticeDataConfig) isValidBlockTime( + b *types.Block, ref time.Time) bool { + return !(b.Timestamp.Before(ref.Add(config.minBlockTimeInterval)) || + b.Timestamp.After(ref.Add(config.maxBlockTimeInterval))) +} + +// isValidGenesisBlockTime check if a timestamp is valid for a genesis block. +func (config *latticeDataConfig) isValidGenesisBlockTime(b *types.Block) bool { + return !(b.Timestamp.Before(config.roundBeginTime) || b.Timestamp.After( + config.roundBeginTime.Add(config.maxBlockTimeInterval))) +} + +// newGenesisLatticeDataConfig constructs a latticeDataConfig instance. +func newGenesisLatticeDataConfig( + dMoment time.Time, config *types.Config) *latticeDataConfig { + c := &latticeDataConfig{} + c.fromConfig(0, config) + c.setRoundBeginTime(dMoment) + return c +} + +// newLatticeDataConfig constructs a latticeDataConfig instance. +func newLatticeDataConfig( + prev *latticeDataConfig, cur *types.Config) *latticeDataConfig { + c := &latticeDataConfig{} + c.fromConfig(prev.roundID+1, cur) + c.setRoundBeginTime(prev.roundEndTime) + return c +} + +// latticeData is a module for storing lattice. +type latticeData struct { + // we need blockdb to read blocks purged from cache in memory. + db blockdb.Reader + // chains stores chains' blocks and other info. + chains []*chainStatus + // blockByHash stores blocks, indexed by block hash. + blockByHash map[common.Hash]*types.Block + // This stores configuration for each round. + configs []*latticeDataConfig +} + +// newLatticeData creates a new latticeData struct. +func newLatticeData( + db blockdb.Reader, genesisConfig *latticeDataConfig) (data *latticeData) { + data = &latticeData{ + db: db, + chains: make([]*chainStatus, genesisConfig.numChains), + blockByHash: make(map[common.Hash]*types.Block), + configs: []*latticeDataConfig{genesisConfig}, + } + for i := range data.chains { + data.chains[i] = &chainStatus{ + ID: uint32(i), + blocks: []*types.Block{}, + lastAckPos: make([]*types.Position, genesisConfig.numChains), + } + } + return +} + +func (data *latticeData) checkAckingRelations(b *types.Block) error { + acksByChainID := make(map[uint32]struct{}, len(data.chains)) + for _, hash := range b.Acks { + bAck, err := data.findBlock(hash) + if err != nil { + if err == blockdb.ErrBlockDoesNotExist { + return ErrAckingBlockNotExists + } + return err + } + // Check if it acks blocks from old rounds, the allowed round difference + // is 1. + if DiffUint64(bAck.Position.Round, b.Position.Round) > 1 { + return ErrRoundOutOfRange + } + // Check if it acks older blocks than blocks on the same chain. + lastAckPos := + data.chains[bAck.Position.ChainID].lastAckPos[b.Position.ChainID] + if lastAckPos != nil && !bAck.Position.Newer(lastAckPos) { + return ErrDoubleAck + } + // Check if ack two blocks on the same chain. This would need + // to check after we replace map with slice for acks. + if _, acked := acksByChainID[bAck.Position.ChainID]; acked { + return ErrDuplicatedAckOnOneChain + } + acksByChainID[bAck.Position.ChainID] = struct{}{} + } + return nil +} + +func (data *latticeData) sanityCheck(b *types.Block) error { + // TODO(mission): Check if its proposer is in validator set somewhere, + // lattice doesn't have to know about node set. + config := data.getConfig(b.Position.Round) + if config == nil { + return ErrInvalidRoundID + } + // Check if the chain id is valid. + if b.Position.ChainID >= config.numChains { + return ErrInvalidChainID + } + // Make sure parent block is arrived. + chain := data.chains[b.Position.ChainID] + chainTip := chain.tip + if chainTip == nil { + if !b.ParentHash.Equal(common.Hash{}) { + return ErrAckingBlockNotExists + } + if !b.IsGenesis() { + return ErrNotGenesisBlock + } + if !config.isValidGenesisBlockTime(b) { + return ErrIncorrectBlockTime + } + return data.checkAckingRelations(b) + } + // Check parent block if parent hash is specified. + if !b.ParentHash.Equal(common.Hash{}) { + if !b.ParentHash.Equal(chainTip.Hash) { + return ErrAckingBlockNotExists + } + if !b.IsAcking(b.ParentHash) { + return ErrNotAckParent + } + } + chainTipConfig := data.getConfig(chainTip.Position.Round) + // Round can't be rewinded. + if chainTip.Position.Round > b.Position.Round { + return ErrInvalidRoundID + } + checkTip := false + if chainTip.Timestamp.After(chainTipConfig.roundEndTime) { + // Round switching should happen when chainTip already pass + // round end time of its round. + if chainTip.Position.Round == b.Position.Round { + return ErrRoundNotSwitch + } + // The round ID is continuous. + if b.Position.Round-chainTip.Position.Round == 1 { + checkTip = true + } else { + // This block should be genesis block of new round because round + // ID is not continuous. + if !b.IsGenesis() { + return ErrNotGenesisBlock + } + if !config.isValidGenesisBlockTime(b) { + return ErrIncorrectBlockTime + } + // TODO(mission): make sure rounds between chainTip and current block + // don't expect blocks from this chain. + } + } else { + if chainTip.Position.Round != b.Position.Round { + // Round should not switch. + return ErrInvalidRoundID + } + checkTip = true + } + // Validate the relation between chain tip when needed. + if checkTip { + if b.Position.Height != chainTip.Position.Height+1 { + return ErrInvalidBlockHeight + } + if b.Witness.Height < chainTip.Witness.Height { + return ErrInvalidWitness + } + if !config.isValidBlockTime(b, chainTip.Timestamp) { + return ErrIncorrectBlockTime + } + // Chain tip should be acked. + if !b.IsAcking(chainTip.Hash) { + return ErrNotAckParent + } + } + if err := data.checkAckingRelations(b); err != nil { + return err + } + return nil +} + +// addBlock processes block, it does sanity check, inserts block into +// lattice and deletes blocks which will not be used. +func (data *latticeData) addBlock( + block *types.Block) (deliverable []*types.Block, err error) { + var ( + bAck *types.Block + updated bool + ) + // TODO(mission): sanity check twice, might hurt performance. + // If a block does not pass sanity check, report error. + if err = data.sanityCheck(block); err != nil { + return + } + if err = data.chains[block.Position.ChainID].addBlock(block); err != nil { + return + } + data.blockByHash[block.Hash] = block + // Update lastAckPos. + for _, ack := range block.Acks { + if bAck, err = data.findBlock(ack); err != nil { + return + } + data.chains[bAck.Position.ChainID].lastAckPos[block.Position.ChainID] = + bAck.Position.Clone() + } + // Extract blocks that deliverable to total ordering. + // A block is deliverable to total ordering iff: + // - All its acking blocks are delivered to total ordering. + for { + updated = false + for _, status := range data.chains { + if status.nextOutputIndex >= len(status.blocks) { + continue + } + tip := status.blocks[status.nextOutputIndex] + allAckingBlockDelivered := true + for _, ack := range tip.Acks { + if bAck, err = data.findBlock(ack); err != nil { + return + } + // Check if this block is outputed or not. + idx := data.chains[bAck.Position.ChainID].findBlock( + &bAck.Position) + if idx == -1 || + idx < data.chains[bAck.Position.ChainID].nextOutputIndex { + continue + } + // This acked block exists and not delivered yet. + allAckingBlockDelivered = false + } + if allAckingBlockDelivered { + status.nextOutputIndex++ + deliverable = append(deliverable, tip) + updated = true + } + } + if !updated { + break + } + } + return +} + +// prepareBlock helps to setup fields of block based on its ChainID and Round, +// including: +// - Acks +// - Timestamp +// - ParentHash and Height from parent block. If there is no valid parent block +// (ex. Newly added chain or bootstrap ), these fields would be setup as +// genesis block. +func (data *latticeData) prepareBlock(b *types.Block) error { + var ( + minTimestamp, maxTimestamp time.Time + config *latticeDataConfig + acks common.Hashes + bindTip bool + chainTip *types.Block + ) + if config = data.getConfig(b.Position.Round); config == nil { + return ErrUnknownRoundID + } + // Reset fields to make sure we got these information from parent block. + b.Position.Height = 0 + b.ParentHash = common.Hash{} + // Decide valid timestamp range. + homeChain := data.chains[b.Position.ChainID] + if homeChain.tip != nil { + chainTip = homeChain.tip + if b.Position.Round < chainTip.Position.Round { + return ErrInvalidRoundID + } + chainTipConfig := data.getConfig(chainTip.Position.Round) + if chainTip.Timestamp.After(chainTipConfig.roundEndTime) { + if b.Position.Round == chainTip.Position.Round { + return ErrRoundNotSwitch + } + if b.Position.Round == chainTip.Position.Round+1 { + bindTip = true + } + } else { + if b.Position.Round != chainTip.Position.Round { + return ErrInvalidRoundID + } + bindTip = true + } + // TODO(mission): find a way to prevent us to assign a witness height + // from Jurassic period. + b.Witness.Height = chainTip.Witness.Height + } + // For blocks with continuous round ID, assign timestamp range based on + // parent block and bound config. + if bindTip { + minTimestamp = chainTip.Timestamp.Add(config.minBlockTimeInterval) + maxTimestamp = chainTip.Timestamp.Add(config.maxBlockTimeInterval) + // When a chain is removed and added back, the reference block + // of previous round can't be used as parent block. + b.ParentHash = chainTip.Hash + b.Position.Height = chainTip.Position.Height + 1 + } else { + // Discontinuous round ID detected, another fresh start of + // new round. + minTimestamp = config.roundBeginTime + maxTimestamp = config.roundBeginTime.Add(config.maxBlockTimeInterval) + } + // Fix timestamp if the given one is invalid. + if b.Timestamp.Before(minTimestamp) { + b.Timestamp = minTimestamp + } else if b.Timestamp.After(maxTimestamp) { + b.Timestamp = maxTimestamp + } + // Setup acks fields. + for _, status := range data.chains { + // Check if we can ack latest block on that chain. + if status.tip == nil { + continue + } + lastAckPos := status.lastAckPos[b.Position.ChainID] + if lastAckPos != nil && !status.tip.Position.Newer(lastAckPos) { + // The reference block is already acked. + continue + } + // Can't ack block too old or too new to us. + if DiffUint64( + status.tip.Position.Round, b.Position.Round) > 1 { + continue + } + acks = append(acks, status.tip.Hash) + } + b.Acks = common.NewSortedHashes(acks) + return nil +} + +// prepareEmptyBlock helps to setup fields of block based on its ChainID. +// including: +// - Acks only acking its parent +// - Timestamp with parent.Timestamp + minBlockProposeInterval +// - ParentHash and Height from parent block. If there is no valid parent block +// (ex. Newly added chain or bootstrap ), these fields would be setup as +// genesis block. +func (data *latticeData) prepareEmptyBlock(b *types.Block) { + // emptyBlock has no proposer. + b.ProposerID = types.NodeID{} + var acks common.Hashes + // Reset fields to make sure we got these information from parent block. + b.Position.Height = 0 + b.Position.Round = 0 + b.ParentHash = common.Hash{} + b.Timestamp = time.Time{} + // Decide valid timestamp range. + homeChain := data.chains[b.Position.ChainID] + if homeChain.tip != nil { + chainTip := homeChain.tip + b.ParentHash = chainTip.Hash + chainTipConfig := data.getConfig(chainTip.Position.Round) + if chainTip.Timestamp.After(chainTipConfig.roundEndTime) { + b.Position.Round = chainTip.Position.Round + 1 + } else { + b.Position.Round = chainTip.Position.Round + } + b.Position.Height = chainTip.Position.Height + 1 + b.Timestamp = chainTip.Timestamp.Add(chainTipConfig.minBlockTimeInterval) + acks = append(acks, chainTip.Hash) + } + b.Acks = common.NewSortedHashes(acks) +} + +// TODO(mission): make more abstraction for this method. +// nextHeight returns the next height for the chain. +func (data *latticeData) nextPosition(chainID uint32) types.Position { + return data.chains[chainID].nextPosition() +} + +// findBlock seeks blocks in memory or db. +func (data *latticeData) findBlock(h common.Hash) (b *types.Block, err error) { + if b = data.blockByHash[h]; b != nil { + return + } + var tmpB types.Block + if tmpB, err = data.db.Get(h); err != nil { + return + } + b = &tmpB + return +} + +// purgeBlocks purges blocks from cache. +func (data *latticeData) purgeBlocks(blocks []*types.Block) error { + for _, b := range blocks { + if _, exists := data.blockByHash[b.Hash]; !exists { + return ErrPurgedBlockNotFound + } + delete(data.blockByHash, b.Hash) + // blocks would be purged in ascending order in position. + if err := data.chains[b.Position.ChainID].purgeBlock(b); err != nil { + return err + } + } + return nil +} + +// getConfig get configuration for lattice-data by round ID. +func (data *latticeData) getConfig(round uint64) (config *latticeDataConfig) { + if round >= uint64(len(data.configs)) { + return + } + return data.configs[round] +} + +// appendConfig appends a configuration for upcoming round. When you append +// a config for round R, next time you can only append the config for round R+1. +func (data *latticeData) appendConfig( + round uint64, config *types.Config) (err error) { + // Make sure caller knows which round this config belongs to. + if round != uint64(len(data.configs)) { + return ErrRoundNotIncreasing + } + // Set round beginning time. + newConfig := newLatticeDataConfig(data.configs[len(data.configs)-1], config) + data.configs = append(data.configs, newConfig) + // Resize each slice if incoming config contains larger number of chains. + if uint32(len(data.chains)) < newConfig.numChains { + count := newConfig.numChains - uint32(len(data.chains)) + for _, status := range data.chains { + status.lastAckPos = append( + status.lastAckPos, make([]*types.Position, count)...) + } + for i := uint32(len(data.chains)); i < newConfig.numChains; i++ { + data.chains = append(data.chains, &chainStatus{ + ID: i, + blocks: []*types.Block{}, + lastAckPos: make([]*types.Position, newConfig.numChains), + }) + } + } + return nil +} + +type chainStatus struct { + // ID keeps the chainID of this chain status. + ID uint32 + // blocks stores blocks proposed for this chain, sorted by height. + blocks []*types.Block + // tip is the last block on this chain. + tip *types.Block + // lastAckPos caches last acking position from other chains. Nil means + // not acked yet. + lastAckPos []*types.Position + // the index to be output next time. + nextOutputIndex int +} + +// findBlock finds index of block in current pending blocks on this chain. +// -1 means not found. +func (s *chainStatus) findBlock(pos *types.Position) (idx int) { + idx = sort.Search(len(s.blocks), func(i int) bool { + return s.blocks[i].Position.Newer(pos) || + s.blocks[i].Position.Equal(pos) + }) + if idx == len(s.blocks) { + idx = -1 + } else if !s.blocks[idx].Position.Equal(pos) { + idx = -1 + } + return idx +} + +// getBlock returns a pending block by giving its index from findBlock method. +func (s *chainStatus) getBlock(idx int) (b *types.Block) { + if idx < 0 || idx >= len(s.blocks) { + return + } + b = s.blocks[idx] + return +} + +// addBlock adds a block to pending blocks on this chain. +func (s *chainStatus) addBlock(b *types.Block) error { + s.blocks = append(s.blocks, b) + s.tip = b + return nil +} + +// TODO(mission): change back to nextHeight. +// nextPosition returns a valid position for new block in this chain. +func (s *chainStatus) nextPosition() types.Position { + if s.tip == nil { + return types.Position{ + ChainID: s.ID, + Height: 0, + } + } + return types.Position{ + ChainID: s.ID, + Height: s.tip.Position.Height + 1, + } +} + +// purgeBlock purge a block from cache, make sure this block already +// persists to blockdb. +func (s *chainStatus) purgeBlock(b *types.Block) error { + if b.Hash != s.blocks[0].Hash || s.nextOutputIndex <= 0 { + return ErrPurgeNotDeliveredBlock + } + s.blocks = s.blocks[1:] + s.nextOutputIndex-- + return nil +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/lattice.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/lattice.go new file mode 100644 index 000000000..984203d7d --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/lattice.go @@ -0,0 +1,265 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "sync" + "time" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/blockdb" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// Lattice represents a unit to produce a global ordering from multiple chains. +type Lattice struct { + lock sync.RWMutex + authModule *Authenticator + chainNum uint32 + app Application + debug Debug + pool blockPool + data *latticeData + toModule *totalOrdering + ctModule *consensusTimestamp + logger common.Logger +} + +// NewLattice constructs an Lattice instance. +func NewLattice( + dMoment time.Time, + cfg *types.Config, + authModule *Authenticator, + app Application, + debug Debug, + db blockdb.BlockDatabase, + logger common.Logger) (s *Lattice) { + // Create genesis latticeDataConfig. + dataConfig := newGenesisLatticeDataConfig(dMoment, cfg) + toConfig := newGenesisTotalOrderingConfig(dMoment, cfg) + s = &Lattice{ + authModule: authModule, + chainNum: cfg.NumChains, + app: app, + debug: debug, + pool: newBlockPool(cfg.NumChains), + data: newLatticeData(db, dataConfig), + toModule: newTotalOrdering(toConfig), + ctModule: newConsensusTimestamp(dMoment, 0, cfg.NumChains), + logger: logger, + } + return +} + +// PrepareBlock setup block's field based on current lattice status. +func (s *Lattice) PrepareBlock( + b *types.Block, proposeTime time.Time) (err error) { + + s.lock.RLock() + defer s.lock.RUnlock() + + b.Timestamp = proposeTime + if err = s.data.prepareBlock(b); err != nil { + return + } + s.logger.Debug("Calling Application.PreparePayload", "position", b.Position) + if b.Payload, err = s.app.PreparePayload(b.Position); err != nil { + return + } + s.logger.Debug("Calling Application.PrepareWitness", + "height", b.Witness.Height) + if b.Witness, err = s.app.PrepareWitness(b.Witness.Height); err != nil { + return + } + if err = s.authModule.SignBlock(b); err != nil { + return + } + return +} + +// PrepareEmptyBlock setup block's field based on current lattice status. +func (s *Lattice) PrepareEmptyBlock(b *types.Block) (err error) { + s.lock.RLock() + defer s.lock.RUnlock() + s.data.prepareEmptyBlock(b) + if b.Hash, err = hashBlock(b); err != nil { + return + } + return +} + +// SanityCheck check if a block is valid. +// If checkRelation is true, it also checks with current lattice status. +// +// If some acking blocks don't exists, Lattice would help to cache this block +// and retry when lattice updated in Lattice.ProcessBlock. +func (s *Lattice) SanityCheck(b *types.Block, checkRelation bool) (err error) { + if b.IsEmpty() { + // Only need to verify block's hash. + var hash common.Hash + if hash, err = hashBlock(b); err != nil { + return + } + if b.Hash != hash { + return ErrInvalidBlock + } + } else { + // Verify block's signature. + if err = s.authModule.VerifyBlock(b); err != nil { + return + } + } + // Make sure acks are sorted. + for i := range b.Acks { + if i == 0 { + continue + } + if !b.Acks[i-1].Less(b.Acks[i]) { + err = ErrAcksNotSorted + return + } + } + // Verify data in application layer. + s.logger.Debug("Calling Application.VerifyBlock", "block", b) + if !s.app.VerifyBlock(b) { + err = ErrInvalidBlock + return err + } + if !checkRelation { + return + } + s.lock.RLock() + defer s.lock.RUnlock() + if err = s.data.sanityCheck(b); err != nil { + // Add to block pool, once the lattice updated, + // would be checked again. + if err == ErrAckingBlockNotExists { + s.pool.addBlock(b) + } + s.logger.Error("Sanity Check failed", "error", err) + return + } + return +} + +// ProcessBlock adds a block into lattice, and deliver ordered blocks. +// If any block pass sanity check after this block add into lattice, they +// would be returned, too. +// +// NOTE: assume the block passed sanity check. +func (s *Lattice) ProcessBlock( + input *types.Block) (verified, delivered []*types.Block, err error) { + + var ( + tip, b *types.Block + toDelivered []*types.Block + inLattice []*types.Block + deliveredMode uint32 + ) + s.lock.Lock() + defer s.lock.Unlock() + if inLattice, err = s.data.addBlock(input); err != nil { + // TODO(mission): if sanity check failed with "acking block doesn't + // exists", we should keep it in a pool. + s.logger.Error("Sanity Check failed when adding blocks", "error", err) + return + } + // TODO(mission): remove this hack, BA related stuffs should not + // be done here. + if s.debug != nil { + s.debug.StronglyAcked(input.Hash) + } + s.logger.Debug("Calling Application.BlockConfirmed", "block", input) + s.app.BlockConfirmed(*input.Clone()) + // Purge blocks in pool with the same chainID and lower height. + s.pool.purgeBlocks(input.Position.ChainID, input.Position.Height) + // Replay tips in pool to check their validity. + for i := uint32(0); i < s.chainNum; i++ { + if tip = s.pool.tip(i); tip == nil { + continue + } + err = s.data.sanityCheck(tip) + if err == nil { + verified = append(verified, tip) + } + if err == ErrAckingBlockNotExists { + continue + } + s.pool.removeTip(i) + } + // Perform total ordering for each block added to lattice. + for _, b = range inLattice { + toDelivered, deliveredMode, err = s.toModule.processBlock(b) + if err != nil { + // All errors from total ordering is serious, should panic. + panic(err) + } + if len(toDelivered) == 0 { + continue + } + hashes := make(common.Hashes, len(toDelivered)) + for idx := range toDelivered { + hashes[idx] = toDelivered[idx].Hash + } + if s.debug != nil { + s.debug.TotalOrderingDelivered(hashes, deliveredMode) + } + // Perform timestamp generation. + if err = s.ctModule.processBlocks(toDelivered); err != nil { + return + } + delivered = append(delivered, toDelivered...) + } + return +} + +// NextPosition returns expected position of incoming block for that chain. +func (s *Lattice) NextPosition(chainID uint32) types.Position { + s.lock.RLock() + defer s.lock.RUnlock() + + return s.data.nextPosition(chainID) +} + +// PurgeBlocks from cache of blocks in memory, this is called when the caller +// make sure those blocks are saved to db. +func (s *Lattice) PurgeBlocks(blocks []*types.Block) error { + s.lock.Lock() + defer s.lock.Unlock() + + return s.data.purgeBlocks(blocks) +} + +// AppendConfig add new configs for upcoming rounds. If you add a config for +// round R, next time you can only add the config for round R+1. +func (s *Lattice) AppendConfig(round uint64, config *types.Config) (err error) { + s.lock.Lock() + defer s.lock.Unlock() + + s.pool.resize(config.NumChains) + if err = s.data.appendConfig(round, config); err != nil { + return + } + if err = s.toModule.appendConfig(round, config); err != nil { + return + } + if err = s.ctModule.appendConfig(round, config); err != nil { + return + } + return +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/leader-selector.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/leader-selector.go new file mode 100644 index 000000000..23b9bb12e --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/leader-selector.go @@ -0,0 +1,106 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "fmt" + "math/big" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// Errors for leader module. +var ( + ErrIncorrectCRSSignature = fmt.Errorf("incorrect CRS signature") +) + +// Some constant value. +var ( + maxHash *big.Int + one *big.Rat +) + +func init() { + hash := make([]byte, common.HashLength) + for i := range hash { + hash[i] = 0xff + } + maxHash = big.NewInt(0).SetBytes(hash) + one = big.NewRat(1, 1) +} + +type leaderSelector struct { + hashCRS common.Hash + numCRS *big.Int + minCRSBlock *big.Int + minBlockHash common.Hash +} + +func newLeaderSelector( + crs common.Hash) *leaderSelector { + numCRS := big.NewInt(0) + numCRS.SetBytes(crs[:]) + return &leaderSelector{ + numCRS: numCRS, + hashCRS: crs, + minCRSBlock: maxHash, + } +} + +func (l *leaderSelector) distance(sig crypto.Signature) *big.Int { + hash := crypto.Keccak256Hash(sig.Signature[:]) + num := big.NewInt(0) + num.SetBytes(hash[:]) + num.Abs(num.Sub(l.numCRS, num)) + return num +} + +func (l *leaderSelector) probability(sig crypto.Signature) float64 { + dis := l.distance(sig) + prob := big.NewRat(1, 1).SetFrac(dis, maxHash) + p, _ := prob.Sub(one, prob).Float64() + return p +} + +func (l *leaderSelector) restart() { + l.minCRSBlock = maxHash + l.minBlockHash = common.Hash{} +} + +func (l *leaderSelector) leaderBlockHash() common.Hash { + return l.minBlockHash +} + +func (l *leaderSelector) processBlock(block *types.Block) error { + ok, err := verifyCRSSignature(block, l.hashCRS) + if err != nil { + return err + } + if !ok { + return ErrIncorrectCRSSignature + } + dist := l.distance(block.CRSSignature) + cmp := l.minCRSBlock.Cmp(dist) + if cmp > 0 || (cmp == 0 && block.Hash.Less(l.minBlockHash)) { + l.minCRSBlock = dist + l.minBlockHash = block.Hash + } + return nil +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/negative-ack.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/negative-ack.go new file mode 100644 index 000000000..89571529c --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/negative-ack.go @@ -0,0 +1,211 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "time" + + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +type negativeAck struct { + // owner is the ID of proposer itself, this is used when deciding + // a node to be restricted or not. + owner types.NodeID + + numOfNodes int + + // timeDelay and timeExpire are for nack timeout. + timeDelay time.Duration + timeExpire time.Duration + + // restricteds stores nodes which has been restricted and the time it's + // restricted. + restricteds map[types.NodeID]time.Time + + // lastVotes and lockedVotes store the votes for nack. lastVotes[nid1][nid2] + // and lockedVotes[nid1][nid2] both mean that nid2 votes nid1. The difference + // is lockedVotes works only when nid1 is restricted, so that the votes are + // needed to be locked. + lastVotes map[types.NodeID]map[types.NodeID]struct{} + lockedVotes map[types.NodeID]map[types.NodeID]struct{} + + // timeDiffs is the cache for last time stamps. timeDiffs[nid1][nid2] means + // the last updated timestamps nid1 sees nid2. + timeDiffs map[types.NodeID]map[types.NodeID]map[types.NodeID]time.Time +} + +// newNegativeAck creates a new negaticeAck instance. +func newNegativeAck(nid types.NodeID) *negativeAck { + n := &negativeAck{ + owner: nid, + numOfNodes: 0, + restricteds: make(map[types.NodeID]time.Time), + lastVotes: make(map[types.NodeID]map[types.NodeID]struct{}), + lockedVotes: make(map[types.NodeID]map[types.NodeID]struct{}), + timeDiffs: make(map[types.NodeID]map[types.NodeID]map[types.NodeID]time.Time), + } + n.addNode(nid) + return n +} + +// processNewVote is called when a new "vote" occurs, that is, a node +// sees that other 2f + 1 nodes think a node is slow. "nid" is the +// node which propesed the block which the timestamps votes and "h" is +// the node been voted to be nacked. +func (n *negativeAck) processNewVote( + nid types.NodeID, + h types.NodeID, +) []types.NodeID { + + nackeds := []types.NodeID{} + if _, exist := n.restricteds[h]; exist { + n.lockedVotes[h][nid] = struct{}{} + if len(n.lockedVotes[h]) > 2*(n.numOfNodes-1)/3 { + nackeds = append(nackeds, h) + delete(n.restricteds, h) + } + } else { + if n.owner == nid { + n.restrict(h) + } else { + n.lastVotes[h][nid] = struct{}{} + if len(n.lastVotes[h]) > (n.numOfNodes-1)/3 { + n.restrict(h) + } + } + } + return nackeds +} + +// processTimestamps process new timestamps of a block which is proposed by +// node nid, and returns the nodes being nacked. +func (n *negativeAck) processTimestamps( + nid types.NodeID, + ts map[types.NodeID]time.Time, +) []types.NodeID { + + n.checkRestrictExpire() + + nackeds := []types.NodeID{} + for h := range n.timeDiffs { + if n.timeDiffs[nid][h][h].Equal(ts[h]) { + votes := 0 + for hh := range n.timeDiffs { + if ts[hh].Sub(n.timeDiffs[nid][h][hh]) >= n.timeDelay { + votes++ + } + } + if votes > 2*((n.numOfNodes-1)/3) { + n.lastVotes[h][nid] = struct{}{} + nack := n.processNewVote(nid, h) + for _, i := range nack { + nackeds = append(nackeds, i) + } + } else { + delete(n.lastVotes[h], nid) + } + } else { + for hh := range n.timeDiffs { + n.timeDiffs[nid][h][hh] = ts[hh] + } + delete(n.lastVotes[h], nid) + } + } + return nackeds +} + +func (n *negativeAck) checkRestrictExpire() { + expired := []types.NodeID{} + now := time.Now() + for h, t := range n.restricteds { + if now.Sub(t) >= n.timeExpire { + expired = append(expired, h) + } + } + for _, h := range expired { + delete(n.restricteds, h) + } +} + +func (n *negativeAck) restrict(nid types.NodeID) { + if _, exist := n.restricteds[nid]; !exist { + n.restricteds[nid] = time.Now().UTC() + n.lockedVotes[nid] = map[types.NodeID]struct{}{} + for h := range n.lastVotes[nid] { + n.lockedVotes[nid][h] = struct{}{} + } + } +} + +func (n *negativeAck) getRestrictedNodes() map[types.NodeID]struct{} { + n.checkRestrictExpire() + ret := map[types.NodeID]struct{}{} + for h := range n.restricteds { + ret[h] = struct{}{} + } + return ret +} + +func (n *negativeAck) setTimeDelay(t time.Duration) { + n.timeDelay = t +} + +func (n *negativeAck) setTimeExpire(t time.Duration) { + n.timeExpire = t +} + +func (n *negativeAck) addNode(nid types.NodeID) { + n.numOfNodes++ + n.lastVotes[nid] = make(map[types.NodeID]struct{}) + n.lockedVotes[nid] = make(map[types.NodeID]struct{}) + + newTimeDiff := make(map[types.NodeID]map[types.NodeID]time.Time) + for h := range n.timeDiffs { + newTimeDiff2 := make(map[types.NodeID]time.Time) + for hh := range n.timeDiffs { + newTimeDiff2[hh] = time.Time{} + } + newTimeDiff[h] = newTimeDiff2 + } + n.timeDiffs[nid] = newTimeDiff + for h := range n.timeDiffs { + n.timeDiffs[h][nid] = make(map[types.NodeID]time.Time) + } +} + +func (n *negativeAck) deleteNode(nid types.NodeID) { + n.numOfNodes-- + + delete(n.timeDiffs, nid) + + for h := range n.lastVotes { + delete(n.lastVotes[h], nid) + } + delete(n.lastVotes, nid) + delete(n.lockedVotes, nid) + + for h := range n.timeDiffs { + delete(n.timeDiffs[h], nid) + for hh := range n.timeDiffs[h] { + delete(n.timeDiffs[h][hh], nid) + } + } + + delete(n.restricteds, nid) +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/nodeset-cache.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/nodeset-cache.go new file mode 100644 index 000000000..5b9f25c14 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/nodeset-cache.go @@ -0,0 +1,233 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "errors" + "sync" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +var ( + // ErrRoundNotReady means we got nil config. + ErrRoundNotReady = errors.New("round is not ready") +) + +type sets struct { + nodeSet *types.NodeSet + notarySet []map[types.NodeID]struct{} + dkgSet map[types.NodeID]struct{} +} + +// NodeSetCacheInterface interface specifies interface used by NodeSetCache. +type NodeSetCacheInterface interface { + // Configuration returns the configuration at a given round. + // Return the genesis configuration if round == 0. + Configuration(round uint64) *types.Config + + // CRS returns the CRS for a given round. + // Return the genesis CRS if round == 0. + CRS(round uint64) common.Hash + + // NodeSet returns the node set at a given round. + // Return the genesis node set if round == 0. + NodeSet(round uint64) []crypto.PublicKey +} + +// NodeSetCache caches node set information. +type NodeSetCache struct { + lock sync.RWMutex + nsIntf NodeSetCacheInterface + rounds map[uint64]*sets + keyPool map[types.NodeID]*struct { + pubKey crypto.PublicKey + refCnt int + } +} + +// NewNodeSetCache constructs an NodeSetCache instance. +func NewNodeSetCache(nsIntf NodeSetCacheInterface) *NodeSetCache { + return &NodeSetCache{ + nsIntf: nsIntf, + rounds: make(map[uint64]*sets), + keyPool: make(map[types.NodeID]*struct { + pubKey crypto.PublicKey + refCnt int + }), + } +} + +// Exists checks if a node is in node set of that round. +func (cache *NodeSetCache) Exists( + round uint64, nodeID types.NodeID) (exists bool, err error) { + + nIDs, exists := cache.get(round) + if !exists { + if nIDs, err = cache.update(round); err != nil { + return + } + } + _, exists = nIDs.nodeSet.IDs[nodeID] + return +} + +// GetPublicKey return public key for that node: +func (cache *NodeSetCache) GetPublicKey( + nodeID types.NodeID) (key crypto.PublicKey, exists bool) { + + cache.lock.RLock() + defer cache.lock.RUnlock() + + rec, exists := cache.keyPool[nodeID] + if exists { + key = rec.pubKey + } + return +} + +// GetNodeSet returns IDs of nodes set of this round as map. +func (cache *NodeSetCache) GetNodeSet( + round uint64) (nIDs *types.NodeSet, err error) { + + IDs, exists := cache.get(round) + if !exists { + if IDs, err = cache.update(round); err != nil { + return + } + } + nIDs = IDs.nodeSet.Clone() + return +} + +// GetNotarySet returns of notary set of this round. +func (cache *NodeSetCache) GetNotarySet( + round uint64, chainID uint32) (map[types.NodeID]struct{}, error) { + IDs, err := cache.getOrUpdate(round) + if err != nil { + return nil, err + } + if chainID >= uint32(len(IDs.notarySet)) { + return nil, ErrInvalidChainID + } + return cache.cloneMap(IDs.notarySet[chainID]), nil +} + +// GetDKGSet returns of DKG set of this round. +func (cache *NodeSetCache) GetDKGSet( + round uint64) (map[types.NodeID]struct{}, error) { + IDs, err := cache.getOrUpdate(round) + if err != nil { + return nil, err + } + return cache.cloneMap(IDs.dkgSet), nil +} + +func (cache *NodeSetCache) cloneMap( + nIDs map[types.NodeID]struct{}) map[types.NodeID]struct{} { + nIDsCopy := make(map[types.NodeID]struct{}, len(nIDs)) + for k := range nIDs { + nIDsCopy[k] = struct{}{} + } + return nIDsCopy +} + +func (cache *NodeSetCache) getOrUpdate(round uint64) (nIDs *sets, err error) { + s, exists := cache.get(round) + if !exists { + if s, err = cache.update(round); err != nil { + return + } + } + nIDs = s + return +} + +// update node set for that round. +// +// This cache would maintain 10 rounds before the updated round and purge +// rounds not in this range. +func (cache *NodeSetCache) update( + round uint64) (nIDs *sets, err error) { + + cache.lock.Lock() + defer cache.lock.Unlock() + + // Get the requested round. + keySet := cache.nsIntf.NodeSet(round) + if keySet == nil { + // That round is not ready yet. + err = ErrRoundNotReady + return + } + // Cache new round. + nodeSet := types.NewNodeSet() + for _, key := range keySet { + nID := types.NewNodeID(key) + nodeSet.Add(nID) + if rec, exists := cache.keyPool[nID]; exists { + rec.refCnt++ + } else { + cache.keyPool[nID] = &struct { + pubKey crypto.PublicKey + refCnt int + }{key, 1} + } + } + cfg := cache.nsIntf.Configuration(round) + crs := cache.nsIntf.CRS(round) + nIDs = &sets{ + nodeSet: nodeSet, + notarySet: make([]map[types.NodeID]struct{}, cfg.NumChains), + dkgSet: nodeSet.GetSubSet( + int(cfg.DKGSetSize), types.NewDKGSetTarget(crs)), + } + for i := range nIDs.notarySet { + nIDs.notarySet[i] = nodeSet.GetSubSet( + int(cfg.NotarySetSize), types.NewNotarySetTarget(crs, uint32(i))) + } + + cache.rounds[round] = nIDs + // Purge older rounds. + for rID, nIDs := range cache.rounds { + nodeSet := nIDs.nodeSet + if round-rID <= 5 { + continue + } + for nID := range nodeSet.IDs { + rec := cache.keyPool[nID] + if rec.refCnt--; rec.refCnt == 0 { + delete(cache.keyPool, nID) + } + } + delete(cache.rounds, rID) + } + return +} + +func (cache *NodeSetCache) get( + round uint64) (nIDs *sets, exists bool) { + + cache.lock.RLock() + defer cache.lock.RUnlock() + + nIDs, exists = cache.rounds[round] + return +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/nonblocking.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/nonblocking.go new file mode 100644 index 000000000..675675b2f --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/nonblocking.go @@ -0,0 +1,164 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "fmt" + "sync" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +type blockConfirmedEvent struct { + block *types.Block +} + +type stronglyAckedEvent struct { + blockHash common.Hash +} + +type totalOrderingDeliveredEvent struct { + blockHashes common.Hashes + mode uint32 +} + +type blockDeliveredEvent struct { + blockHash common.Hash + result *types.FinalizationResult +} + +// nonBlocking implements these interfaces and is a decorator for +// them that makes the methods to be non-blocking. +// - Application +// - Debug +// - It also provides nonblockig for blockdb update. +type nonBlocking struct { + app Application + debug Debug + eventChan chan interface{} + events []interface{} + eventsChange *sync.Cond + running sync.WaitGroup +} + +func newNonBlocking(app Application, debug Debug) *nonBlocking { + nonBlockingModule := &nonBlocking{ + app: app, + debug: debug, + eventChan: make(chan interface{}, 6), + events: make([]interface{}, 0, 100), + eventsChange: sync.NewCond(&sync.Mutex{}), + } + go nonBlockingModule.run() + return nonBlockingModule +} + +func (nb *nonBlocking) addEvent(event interface{}) { + nb.eventsChange.L.Lock() + defer nb.eventsChange.L.Unlock() + nb.events = append(nb.events, event) + nb.eventsChange.Broadcast() +} + +func (nb *nonBlocking) run() { + // This go routine consume the first event from events and call the + // corresponding methods of Application/Debug/blockdb. + for { + var event interface{} + func() { + nb.eventsChange.L.Lock() + defer nb.eventsChange.L.Unlock() + for len(nb.events) == 0 { + nb.eventsChange.Wait() + } + event = nb.events[0] + nb.events = nb.events[1:] + nb.running.Add(1) + }() + switch e := event.(type) { + case stronglyAckedEvent: + nb.debug.StronglyAcked(e.blockHash) + case blockConfirmedEvent: + nb.app.BlockConfirmed(*e.block) + case totalOrderingDeliveredEvent: + nb.debug.TotalOrderingDelivered(e.blockHashes, e.mode) + case blockDeliveredEvent: + nb.app.BlockDelivered(e.blockHash, *e.result) + default: + fmt.Printf("Unknown event %v.", e) + } + nb.running.Done() + nb.eventsChange.Broadcast() + } +} + +// wait will wait for all event in events finishes. +func (nb *nonBlocking) wait() { + nb.eventsChange.L.Lock() + defer nb.eventsChange.L.Unlock() + for len(nb.events) > 0 { + nb.eventsChange.Wait() + } + nb.running.Wait() +} + +// PreparePayload cannot be non-blocking. +func (nb *nonBlocking) PreparePayload(position types.Position) ([]byte, error) { + return nb.app.PreparePayload(position) +} + +// PrepareWitness cannot be non-blocking. +func (nb *nonBlocking) PrepareWitness(height uint64) (types.Witness, error) { + return nb.app.PrepareWitness(height) +} + +// VerifyBlock cannot be non-blocking. +func (nb *nonBlocking) VerifyBlock(block *types.Block) bool { + return nb.app.VerifyBlock(block) +} + +// BlockConfirmed is called when a block is confirmed and added to lattice. +func (nb *nonBlocking) BlockConfirmed(block types.Block) { + nb.addEvent(blockConfirmedEvent{&block}) +} + +// StronglyAcked is called when a block is strongly acked. +func (nb *nonBlocking) StronglyAcked(blockHash common.Hash) { + if nb.debug != nil { + nb.addEvent(stronglyAckedEvent{blockHash}) + } +} + +// TotalOrderingDelivered is called when the total ordering algorithm deliver +// a set of block. +func (nb *nonBlocking) TotalOrderingDelivered( + blockHashes common.Hashes, mode uint32) { + if nb.debug != nil { + nb.addEvent(totalOrderingDeliveredEvent{blockHashes, mode}) + } +} + +// BlockDelivered is called when a block is add to the compaction chain. +func (nb *nonBlocking) BlockDelivered( + blockHash common.Hash, result types.FinalizationResult) { + nb.addEvent(blockDeliveredEvent{ + blockHash: blockHash, + result: &result, + }) +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/round-based-config.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/round-based-config.go new file mode 100644 index 000000000..24ade494f --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/round-based-config.go @@ -0,0 +1,50 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "time" + + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +type roundBasedConfig struct { + roundID uint64 + // roundBeginTime is the beginning of round, as local time. + roundBeginTime time.Time + roundInterval time.Duration + // roundEndTime is a cache for begin + interval. + roundEndTime time.Time +} + +func (config *roundBasedConfig) setupRoundBasedFields( + roundID uint64, cfg *types.Config) { + config.roundID = roundID + config.roundInterval = cfg.RoundInterval +} + +func (config *roundBasedConfig) setRoundBeginTime(begin time.Time) { + config.roundBeginTime = begin + config.roundEndTime = begin.Add(config.roundInterval) +} + +// isValidLastBlock checks if a block is a valid last block of this round. +func (config *roundBasedConfig) isValidLastBlock(b *types.Block) bool { + return b.Position.Round == config.roundID && + b.Timestamp.After(config.roundEndTime) +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/ticker.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/ticker.go new file mode 100644 index 000000000..0d2e433ff --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/ticker.go @@ -0,0 +1,77 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import "time" + +// TickerType is the type of ticker. +type TickerType int + +// TickerType enum. +const ( + TickerBA TickerType = iota + TickerDKG + TickerCRS +) + +// defaultTicker is a wrapper to implement ticker interface based on +// time.Ticker. +type defaultTicker struct { + ticker *time.Ticker +} + +// newDefaultTicker constructs an defaultTicker instance by giving an interval. +func newDefaultTicker(lambda time.Duration) *defaultTicker { + return &defaultTicker{ticker: time.NewTicker(lambda)} +} + +// Tick implements Tick method of ticker interface. +func (t *defaultTicker) Tick() <-chan time.Time { + return t.ticker.C +} + +// Stop implements Stop method of ticker interface. +func (t *defaultTicker) Stop() { + t.ticker.Stop() +} + +// newTicker is a helper to setup a ticker by giving an Governance. If +// the governace object implements a ticker generator, a ticker from that +// generator would be returned, else constructs a default one. +func newTicker(gov Governance, round uint64, tickerType TickerType) (t Ticker) { + type tickerGenerator interface { + NewTicker(TickerType) Ticker + } + + if gen, ok := gov.(tickerGenerator); ok { + t = gen.NewTicker(tickerType) + } + if t == nil { + var duration time.Duration + switch tickerType { + case TickerBA: + duration = gov.Configuration(round).LambdaBA + case TickerDKG: + duration = gov.Configuration(round).LambdaDKG + case TickerCRS: + duration = gov.Configuration(round).RoundInterval / 2 + } + t = newDefaultTicker(duration) + } + return +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/total-ordering.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/total-ordering.go new file mode 100644 index 000000000..ec9e643f0 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/total-ordering.go @@ -0,0 +1,1329 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "errors" + "math" + "sort" + "sync" + "time" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +const ( + infinity uint64 = math.MaxUint64 +) + +const ( + // TotalOrderingModeError returns mode error. + TotalOrderingModeError uint32 = iota + // TotalOrderingModeNormal returns mode normal. + TotalOrderingModeNormal + // TotalOrderingModeEarly returns mode early. + TotalOrderingModeEarly + // TotalOrderingModeFlush returns mode flush. + TotalOrderingModeFlush +) + +var ( + // ErrNotValidDAG would be reported when block subbmitted to totalOrdering + // didn't form a DAG. + ErrNotValidDAG = errors.New("not a valid dag") + // ErrFutureRoundDelivered means some blocks from later rounds are + // delivered, this means program error. + ErrFutureRoundDelivered = errors.New("future round delivered") + // ErrBlockFromPastRound means we receive some block from past round. + ErrBlockFromPastRound = errors.New("block from past round") + // ErrTotalOrderingHangs means total ordering hangs somewhere. + ErrTotalOrderingHangs = errors.New("total ordering hangs") + // ErrForwardAck means a block acking some blocks from newer round. + ErrForwardAck = errors.New("forward ack") + // ErrUnexpected means general (I'm lazy) errors. + ErrUnexpected = errors.New("unexpected") +) + +// totalOrderingConfig is the configuration for total ordering. +type totalOrderingConfig struct { + roundBasedConfig + // k represents the k in 'k-level total ordering'. + // In short, only block height equals to (global minimum height + k) + // would be taken into consideration. + k uint64 + // phi is a const to control how strong the leading preceding block + // should be. + phi uint64 + // chainNum is the count of chains. + numChains uint32 + // Is round cutting required? + isFlushRequired bool +} + +func (config *totalOrderingConfig) fromConfig( + roundID uint64, cfg *types.Config) { + config.k = uint64(cfg.K) + config.numChains = cfg.NumChains + config.phi = uint64(float32(cfg.NumChains-1)*cfg.PhiRatio + 1) + config.setupRoundBasedFields(roundID, cfg) +} + +func newGenesisTotalOrderingConfig( + dMoment time.Time, config *types.Config) *totalOrderingConfig { + c := &totalOrderingConfig{} + c.fromConfig(0, config) + c.setRoundBeginTime(dMoment) + return c +} + +func newTotalOrderingConfig( + prev *totalOrderingConfig, cur *types.Config) *totalOrderingConfig { + c := &totalOrderingConfig{} + c.fromConfig(prev.roundID+1, cur) + c.setRoundBeginTime(prev.roundEndTime) + prev.isFlushRequired = c.k != prev.k || + c.phi != prev.phi || + c.numChains != prev.numChains + return c +} + +// totalOrderingWinRecord caches which chains this candidate +// wins another one based on their height vector. +type totalOrderingWinRecord struct { + wins []int8 + count uint +} + +func (rec *totalOrderingWinRecord) reset() { + rec.count = 0 + for idx := range rec.wins { + rec.wins[idx] = 0 + } +} + +func newTotalOrderingWinRecord(numChains uint32) ( + rec *totalOrderingWinRecord) { + rec = &totalOrderingWinRecord{} + rec.reset() + rec.wins = make([]int8, numChains) + return +} + +// grade implements the 'grade' potential function described in white paper. +func (rec *totalOrderingWinRecord) grade( + numChains uint32, phi uint64, globalAnsLength uint64) int { + if uint64(rec.count) >= phi { + return 1 + } else if uint64(rec.count) < phi-uint64(numChains)+globalAnsLength { + return 0 + } else { + return -1 + } +} + +// totalOrderingHeightRecord records two things: +// - the minimum heiht of block from that chain acking this block. +// - the count of blocks from that chain acking this block. +type totalOrderingHeightRecord struct{ minHeight, count uint64 } + +// totalOrderingObjectCache caches objects for reuse. +// The target object is map because: +// - reuse map would prevent it grows during usage, when map grows, +// hashes of key would be recaculated, bucket reallocated, and values +// are copied. +// However, to reuse a map, we have no easy way to erase its content but +// iterating its keys and delete corresponding values. +type totalOrderingObjectCache struct { + ackedStatus [][]*totalOrderingHeightRecord + heightVectors [][]uint64 + winRecordContainers [][]*totalOrderingWinRecord + ackedVectors []map[common.Hash]struct{} + winRecordPool sync.Pool + numChains uint32 +} + +// newTotalOrderingObjectCache constructs an totalOrderingObjectCache +// instance. +func newTotalOrderingObjectCache(numChains uint32) *totalOrderingObjectCache { + return &totalOrderingObjectCache{ + winRecordPool: sync.Pool{ + New: func() interface{} { + return newTotalOrderingWinRecord(numChains) + }, + }, + numChains: numChains, + } +} + +// resize makes sure internal storage of totalOrdering instance can handle +// maximum possible numChains in future configs. +func (cache *totalOrderingObjectCache) resize(numChains uint32) { + // Basically, everything in cache needs to be cleaned. + if cache.numChains >= numChains { + return + } + cache.ackedStatus = nil + cache.heightVectors = nil + cache.winRecordContainers = nil + cache.ackedVectors = nil + cache.numChains = numChains + cache.winRecordPool = sync.Pool{ + New: func() interface{} { + return newTotalOrderingWinRecord(numChains) + }, + } +} + +// requestAckedStatus requests a structure to record acking status of one +// candidate (or a global view of acking status of pending set). +func (cache *totalOrderingObjectCache) requestAckedStatus() ( + acked []*totalOrderingHeightRecord) { + if len(cache.ackedStatus) == 0 { + acked = make([]*totalOrderingHeightRecord, cache.numChains) + for idx := range acked { + acked[idx] = &totalOrderingHeightRecord{count: 0} + } + } else { + acked, cache.ackedStatus = + cache.ackedStatus[len(cache.ackedStatus)-1], + cache.ackedStatus[:len(cache.ackedStatus)-1] + // Reset acked status. + for idx := range acked { + acked[idx].count = 0 + } + } + return +} + +// recycleAckedStatys recycles the structure to record acking status. +func (cache *totalOrderingObjectCache) recycleAckedStatus( + acked []*totalOrderingHeightRecord) { + cache.ackedStatus = append(cache.ackedStatus, acked) +} + +// requestWinRecord requests an totalOrderingWinRecord instance. +func (cache *totalOrderingObjectCache) requestWinRecord() ( + win *totalOrderingWinRecord) { + win = cache.winRecordPool.Get().(*totalOrderingWinRecord) + win.reset() + return +} + +// recycleWinRecord recycles an totalOrderingWinRecord instance. +func (cache *totalOrderingObjectCache) recycleWinRecord( + win *totalOrderingWinRecord) { + if win == nil { + return + } + cache.winRecordPool.Put(win) +} + +// requestHeightVector requests a structure to record acking heights +// of one candidate. +func (cache *totalOrderingObjectCache) requestHeightVector() (hv []uint64) { + if len(cache.heightVectors) == 0 { + hv = make([]uint64, cache.numChains) + } else { + hv, cache.heightVectors = + cache.heightVectors[len(cache.heightVectors)-1], + cache.heightVectors[:len(cache.heightVectors)-1] + } + for idx := range hv { + hv[idx] = infinity + } + return +} + +// recycleHeightVector recycles an instance to record acking heights +// of one candidate. +func (cache *totalOrderingObjectCache) recycleHeightVector(hv []uint64) { + cache.heightVectors = append(cache.heightVectors, hv) +} + +// requestWinRecordContainer requests a map of totalOrderingWinRecord. +func (cache *totalOrderingObjectCache) requestWinRecordContainer() ( + con []*totalOrderingWinRecord) { + if len(cache.winRecordContainers) == 0 { + con = make([]*totalOrderingWinRecord, cache.numChains) + } else { + con, cache.winRecordContainers = + cache.winRecordContainers[len(cache.winRecordContainers)-1], + cache.winRecordContainers[:len(cache.winRecordContainers)-1] + for idx := range con { + con[idx] = nil + } + } + return +} + +// recycleWinRecordContainer recycles a map of totalOrderingWinRecord. +func (cache *totalOrderingObjectCache) recycleWinRecordContainer( + con []*totalOrderingWinRecord) { + cache.winRecordContainers = append(cache.winRecordContainers, con) +} + +// requestAckedVector requests an acked vector instance. +func (cache *totalOrderingObjectCache) requestAckedVector() ( + acked map[common.Hash]struct{}) { + if len(cache.ackedVectors) == 0 { + acked = make(map[common.Hash]struct{}) + } else { + acked, cache.ackedVectors = + cache.ackedVectors[len(cache.ackedVectors)-1], + cache.ackedVectors[:len(cache.ackedVectors)-1] + for k := range acked { + delete(acked, k) + } + } + return +} + +// recycleAckedVector recycles an acked vector instance. +func (cache *totalOrderingObjectCache) recycleAckedVector( + acked map[common.Hash]struct{}) { + if acked == nil { + return + } + cache.ackedVectors = append(cache.ackedVectors, acked) +} + +// totalOrderingCandidateInfo describes proceeding status for one candidate, +// including: +// - acked status as height records, which could keep 'how many blocks from +// one chain acking this candidate. +// - cached height vector, which valid height based on K-level used for +// comparison in 'grade' function. +// - cached result of grade function to other candidates. +// +// Height Record: +// When block A acks block B, all blocks proposed from the same proposer +// as block A with higher height would also acks block B. Therefore, +// we just need to record: +// - the minimum height of acking block from that proposer +// - count of acking blocks from that proposer +// to repsent the acking status for block A. +type totalOrderingCandidateInfo struct { + ackedStatus []*totalOrderingHeightRecord + cachedHeightVector []uint64 + winRecords []*totalOrderingWinRecord + hash common.Hash +} + +// newTotalOrderingCandidateInfo constructs an totalOrderingCandidateInfo +// instance. +func newTotalOrderingCandidateInfo( + candidateHash common.Hash, + objCache *totalOrderingObjectCache) *totalOrderingCandidateInfo { + return &totalOrderingCandidateInfo{ + ackedStatus: objCache.requestAckedStatus(), + winRecords: objCache.requestWinRecordContainer(), + hash: candidateHash, + } +} + +// clean clear information related to another candidate, which should be called +// when that candidate is selected as deliver set. +func (v *totalOrderingCandidateInfo) clean(otherCandidateChainID uint32) { + v.winRecords[otherCandidateChainID] = nil +} + +// recycle objects for later usage, this eases the loading of +// golangs' GC. +func (v *totalOrderingCandidateInfo) recycle( + objCache *totalOrderingObjectCache) { + if v.winRecords != nil { + for _, win := range v.winRecords { + objCache.recycleWinRecord(win) + } + objCache.recycleWinRecordContainer(v.winRecords) + } + if v.cachedHeightVector != nil { + objCache.recycleHeightVector(v.cachedHeightVector) + } + objCache.recycleAckedStatus(v.ackedStatus) +} + +// addBlock would update totalOrderingCandidateInfo, it's caller's duty +// to make sure the input block acutally acking the target block. +func (v *totalOrderingCandidateInfo) addBlock(b *types.Block) (err error) { + rec := v.ackedStatus[b.Position.ChainID] + if rec.count == 0 { + rec.minHeight = b.Position.Height + rec.count = 1 + } else { + if b.Position.Height < rec.minHeight { + err = ErrNotValidDAG + return + } + rec.count++ + } + return +} + +// getAckingNodeSetLength would generate the Acking Node Set and return its +// length. Only block height larger than +// +// global minimum height + k +// +// would be taken into consideration, ex. +// +// For some chain X: +// - the global minimum acking height = 1, +// - k = 1 +// then only block height >= 2 would be added to acking node set. +func (v *totalOrderingCandidateInfo) getAckingNodeSetLength( + global *totalOrderingCandidateInfo, + k uint64, + numChains uint32) (count uint64) { + var rec *totalOrderingHeightRecord + for idx, gRec := range global.ackedStatus[:numChains] { + if gRec.count == 0 { + continue + } + rec = v.ackedStatus[idx] + if rec.count == 0 { + continue + } + // This line would check if these two ranges would overlap: + // - (global minimum height + k, infinity) + // - (local minimum height, local minimum height + count - 1) + if rec.minHeight+rec.count-1 >= gRec.minHeight+k { + count++ + } + } + return +} + +// updateAckingHeightVector would cached acking height vector. +// +// Only block height equals to (global minimum block height + k) would be +// taken into consideration. +func (v *totalOrderingCandidateInfo) updateAckingHeightVector( + global *totalOrderingCandidateInfo, + k uint64, + dirtyChainIDs []int, + objCache *totalOrderingObjectCache) { + var ( + idx int + gRec, rec *totalOrderingHeightRecord + ) + // The reason not to merge the two loops is the iteration over map + // is expensive when chain count is large, iterating over dirty + // chains is cheaper. + // TODO(mission): merge the code in this if/else if the performance won't be + // downgraded when adding a function for the shared part. + if v.cachedHeightVector == nil { + // Generate height vector from scratch. + v.cachedHeightVector = objCache.requestHeightVector() + for idx, gRec = range global.ackedStatus { + if gRec.count <= k { + continue + } + rec = v.ackedStatus[idx] + if rec.count == 0 { + v.cachedHeightVector[idx] = infinity + } else if rec.minHeight <= gRec.minHeight+k { + // This check is sufficient to make sure the block height: + // + // gRec.minHeight + k + // + // would be included in this totalOrderingCandidateInfo. + v.cachedHeightVector[idx] = gRec.minHeight + k + } else { + v.cachedHeightVector[idx] = infinity + } + } + } else { + // Return the cached one, only update dirty fields. + for _, idx = range dirtyChainIDs { + gRec = global.ackedStatus[idx] + if gRec.count == 0 || gRec.count <= k { + v.cachedHeightVector[idx] = infinity + continue + } + rec = v.ackedStatus[idx] + if rec.count == 0 { + v.cachedHeightVector[idx] = infinity + } else if rec.minHeight <= gRec.minHeight+k { + v.cachedHeightVector[idx] = gRec.minHeight + k + } else { + v.cachedHeightVector[idx] = infinity + } + } + } + return +} + +// updateWinRecord setup win records between two candidates. +func (v *totalOrderingCandidateInfo) updateWinRecord( + otherChainID uint32, + other *totalOrderingCandidateInfo, + dirtyChainIDs []int, + objCache *totalOrderingObjectCache, + numChains uint32) { + var ( + idx int + height uint64 + ) + // The reason not to merge the two loops is the iteration over map + // is expensive when chain count is large, iterating over dirty + // chains is cheaper. + // TODO(mission): merge the code in this if/else if add a function won't + // affect the performance. + win := v.winRecords[otherChainID] + if win == nil { + win = objCache.requestWinRecord() + v.winRecords[otherChainID] = win + for idx, height = range v.cachedHeightVector[:numChains] { + if height == infinity { + continue + } + if other.cachedHeightVector[idx] == infinity { + win.wins[idx] = 1 + win.count++ + } + } + } else { + for _, idx = range dirtyChainIDs { + if v.cachedHeightVector[idx] == infinity { + if win.wins[idx] == 1 { + win.wins[idx] = 0 + win.count-- + } + continue + } + if other.cachedHeightVector[idx] == infinity { + if win.wins[idx] == 0 { + win.wins[idx] = 1 + win.count++ + } + } else { + if win.wins[idx] == 1 { + win.wins[idx] = 0 + win.count-- + } + } + } + } +} + +// totalOrderingBreakpoint is a record to store the height discontinuity +// on a chain. +type totalOrderingBreakpoint struct { + roundID uint64 + // height of last block in previous round. + lastHeight uint64 +} + +// totalOrderingGroupVector keeps global status of current pending set. +type totalOrderingGlobalVector struct { + // blocks stores all blocks grouped by their proposers and + // sorted by their block height. + // + // TODO(mission): the way we use this slice would make it reallocate + // frequently. + blocks [][]*types.Block + + // breakpoints caches rounds for chains that blocks' height on them are + // not continuous. Ex. + // ChainID Round Height + // 1 0 0 + // 1 0 1 + // 1 1 2 + // 1 1 3 + // 1 1 4 + // 1 3 0 <- a breakpoint for round 3 would be cached + // for chain 1 as (roundID=1, lastHeight=4). + breakpoints [][]*totalOrderingBreakpoint + + // curRound caches the last round ID used to purge breakpoints. + curRound uint64 + + // tips records the last seen block for each chain. + tips []*types.Block + + // cachedCandidateInfo is an totalOrderingCandidateInfo instance, + // which is just used for actual candidates to calculate height vector. + cachedCandidateInfo *totalOrderingCandidateInfo +} + +func newTotalOrderingGlobalVector(numChains uint32) *totalOrderingGlobalVector { + return &totalOrderingGlobalVector{ + blocks: make([][]*types.Block, numChains), + tips: make([]*types.Block, numChains), + breakpoints: make([][]*totalOrderingBreakpoint, numChains), + } +} + +func (global *totalOrderingGlobalVector) resize(numChains uint32) { + if len(global.blocks) >= int(numChains) { + return + } + // Resize blocks. + newBlocks := make([][]*types.Block, numChains) + copy(newBlocks, global.blocks) + global.blocks = newBlocks + // Resize breakpoints. + newBreakPoints := make([][]*totalOrderingBreakpoint, numChains) + copy(newBreakPoints, global.breakpoints) + global.breakpoints = newBreakPoints + // Resize tips. + newTips := make([]*types.Block, numChains) + copy(newTips, global.tips) + global.tips = newTips +} + +func (global *totalOrderingGlobalVector) switchRound(roundID uint64) { + if global.curRound+1 != roundID { + panic(ErrUnexpected) + } + global.curRound = roundID + for chainID, bs := range global.breakpoints { + if len(bs) == 0 { + continue + } + if bs[0].roundID == roundID { + global.breakpoints[chainID] = bs[1:] + } + } +} + +func (global *totalOrderingGlobalVector) prepareHeightRecord( + candidate *types.Block, + info *totalOrderingCandidateInfo, + acked map[common.Hash]struct{}) { + var ( + chainID = candidate.Position.ChainID + breakpoints = global.breakpoints[chainID] + breakpoint *totalOrderingBreakpoint + rec *totalOrderingHeightRecord + ) + // Setup height record for own chain. + rec = &totalOrderingHeightRecord{ + minHeight: candidate.Position.Height, + } + if len(breakpoints) == 0 { + rec.count = uint64(len(global.blocks[chainID])) + } else { + rec.count = breakpoints[0].lastHeight - candidate.Position.Height + 1 + } + info.ackedStatus[chainID] = rec + if acked == nil { + return + } + for idx, blocks := range global.blocks { + if idx == int(candidate.Position.ChainID) { + continue + } + breakpoint = nil + if len(global.breakpoints[idx]) > 0 { + breakpoint = global.breakpoints[idx][0] + } + for i, b := range blocks { + if breakpoint != nil && b.Position.Round >= breakpoint.roundID { + break + } + if _, acked := acked[b.Hash]; !acked { + continue + } + // If this block acks this candidate, all newer blocks + // from the same chain also 'indirect' acks it. + rec = info.ackedStatus[idx] + rec.minHeight = b.Position.Height + if breakpoint == nil { + rec.count = uint64(len(blocks) - i) + } else { + rec.count = breakpoint.lastHeight - b.Position.Height + 1 + } + break + } + } + +} + +func (global *totalOrderingGlobalVector) addBlock( + b *types.Block) (pos int, pending bool, err error) { + curPosition := b.Position + tip := global.tips[curPosition.ChainID] + pos = len(global.blocks[curPosition.ChainID]) + if tip != nil { + // Perform light weight sanity check based on tip. + lastPosition := tip.Position + if lastPosition.Round > curPosition.Round { + err = ErrNotValidDAG + return + } + if DiffUint64(lastPosition.Round, curPosition.Round) > 1 { + if curPosition.Height != 0 { + err = ErrNotValidDAG + return + } + // Add breakpoint. + global.breakpoints[curPosition.ChainID] = append( + global.breakpoints[curPosition.ChainID], + &totalOrderingBreakpoint{ + roundID: curPosition.Round, + lastHeight: lastPosition.Height, + }) + } else { + if curPosition.Height != lastPosition.Height+1 { + err = ErrNotValidDAG + return + } + } + } else { + if curPosition.Round < global.curRound { + err = ErrBlockFromPastRound + return + } + if curPosition.Round > global.curRound { + // Add breakpoint. + global.breakpoints[curPosition.ChainID] = append( + global.breakpoints[curPosition.ChainID], + &totalOrderingBreakpoint{ + roundID: curPosition.Round, + lastHeight: 0, + }) + } + } + breakpoints := global.breakpoints[b.Position.ChainID] + pending = len(breakpoints) > 0 && breakpoints[0].roundID <= b.Position.Round + global.blocks[b.Position.ChainID] = append( + global.blocks[b.Position.ChainID], b) + global.tips[b.Position.ChainID] = b + return +} + +// updateCandidateInfo udpate cached candidate info. +func (global *totalOrderingGlobalVector) updateCandidateInfo( + dirtyChainIDs []int, objCache *totalOrderingObjectCache) { + var ( + idx int + blocks []*types.Block + block *types.Block + info *totalOrderingCandidateInfo + rec *totalOrderingHeightRecord + breakpoint *totalOrderingBreakpoint + ) + if global.cachedCandidateInfo == nil { + info = newTotalOrderingCandidateInfo(common.Hash{}, objCache) + for idx, blocks = range global.blocks { + if len(blocks) == 0 { + continue + } + rec = info.ackedStatus[idx] + if len(global.breakpoints[idx]) > 0 { + breakpoint = global.breakpoints[idx][0] + block = blocks[0] + if block.Position.Round >= breakpoint.roundID { + continue + } + rec.minHeight = block.Position.Height + rec.count = breakpoint.lastHeight - block.Position.Height + 1 + } else { + rec.minHeight = blocks[0].Position.Height + rec.count = uint64(len(blocks)) + } + } + global.cachedCandidateInfo = info + } else { + info = global.cachedCandidateInfo + for _, idx = range dirtyChainIDs { + blocks = global.blocks[idx] + if len(blocks) == 0 { + info.ackedStatus[idx].count = 0 + continue + } + rec = info.ackedStatus[idx] + if len(global.breakpoints[idx]) > 0 { + breakpoint = global.breakpoints[idx][0] + block = blocks[0] + if block.Position.Round >= breakpoint.roundID { + continue + } + rec.minHeight = block.Position.Height + rec.count = breakpoint.lastHeight - block.Position.Height + 1 + } else { + rec.minHeight = blocks[0].Position.Height + rec.count = uint64(len(blocks)) + } + } + } + return +} + +// totalOrdering represent a process unit to handle total ordering +// for blocks. +type totalOrdering struct { + // pendings stores blocks awaiting to be ordered. + pendings map[common.Hash]*types.Block + + // The round of config used when performing total ordering. + curRound uint64 + + // duringFlush is a flag to switch the flush mode and normal mode. + duringFlush bool + + // flushReadyChains checks if the last block of that chain arrived. Once + // last blocks from all chains in current config are arrived, we can + // perform flush. + flushReadyChains map[uint32]struct{} + + // flush is a map to record which blocks are already flushed. + flushed map[uint32]struct{} + + // globalVector group all pending blocks by proposers and + // sort them by block height. This structure is helpful when: + // + // - build global height vector + // - picking candidates next round + globalVector *totalOrderingGlobalVector + + // candidates caches result of potential function during generating + // preceding sets. + candidates []*totalOrderingCandidateInfo + + // acked cache the 'block A acked by block B' relation by + // keeping a record in acked[A.Hash][B.Hash] + acked map[common.Hash]map[common.Hash]struct{} + + // dirtyChainIDs records which chainID that should be updated + // for all cached status (win record, acking status). + dirtyChainIDs []int + + // objCache caches allocated objects, like map. + objCache *totalOrderingObjectCache + + // candidateChainMapping keeps a mapping from candidate's hash to + // their chain IDs. + candidateChainMapping map[uint32]common.Hash + + // candidateChainIDs records chain ID of all candidates. + candidateChainIDs []uint32 + + // configs keeps configuration for each round in continuous way. + configs []*totalOrderingConfig +} + +// newTotalOrdering constructs an totalOrdering instance. +func newTotalOrdering(config *totalOrderingConfig) *totalOrdering { + globalVector := newTotalOrderingGlobalVector(config.numChains) + objCache := newTotalOrderingObjectCache(config.numChains) + candidates := make([]*totalOrderingCandidateInfo, config.numChains) + to := &totalOrdering{ + pendings: make(map[common.Hash]*types.Block), + globalVector: globalVector, + dirtyChainIDs: make([]int, 0, config.numChains), + acked: make(map[common.Hash]map[common.Hash]struct{}), + objCache: objCache, + candidateChainMapping: make(map[uint32]common.Hash), + candidates: candidates, + candidateChainIDs: make([]uint32, 0, config.numChains), + curRound: config.roundID, + } + to.configs = []*totalOrderingConfig{config} + return to +} + +// appendConfig add new configs for upcoming rounds. If you add a config for +// round R, next time you can only add the config for round R+1. +func (to *totalOrdering) appendConfig( + round uint64, config *types.Config) error { + if round != uint64(len(to.configs))+to.configs[0].roundID { + return ErrRoundNotIncreasing + } + to.configs = append( + to.configs, + newTotalOrderingConfig(to.configs[len(to.configs)-1], config)) + // Resize internal structures. + to.globalVector.resize(config.NumChains) + to.objCache.resize(config.NumChains) + if int(config.NumChains) > len(to.candidates) { + newCandidates := make([]*totalOrderingCandidateInfo, config.NumChains) + copy(newCandidates, to.candidates) + to.candidates = newCandidates + } + return nil +} + +func (to *totalOrdering) switchRound() { + to.curRound++ + to.globalVector.switchRound(to.curRound) +} + +// buildBlockRelation populates the acked according their acking relationships. +// This function would update all blocks implcitly acked by input block +// recursively. +func (to *totalOrdering) buildBlockRelation(b *types.Block) { + var ( + curBlock, nextBlock *types.Block + ack common.Hash + acked map[common.Hash]struct{} + exists, alreadyPopulated bool + toCheck = []*types.Block{b} + ) + for { + if len(toCheck) == 0 { + break + } + curBlock, toCheck = toCheck[len(toCheck)-1], toCheck[:len(toCheck)-1] + if curBlock.Position.Round > b.Position.Round { + // It's illegal for a block to acking some block from future + // round, this rule should be promised before delivering to + // total ordering. + panic(ErrForwardAck) + } + for _, ack = range curBlock.Acks { + if acked, exists = to.acked[ack]; !exists { + acked = to.objCache.requestAckedVector() + to.acked[ack] = acked + } + // This means we've walked this block already. + if _, alreadyPopulated = acked[b.Hash]; alreadyPopulated { + continue + } + acked[b.Hash] = struct{}{} + // See if we need to go forward. + if nextBlock, exists = to.pendings[ack]; !exists { + continue + } else { + toCheck = append(toCheck, nextBlock) + } + } + } +} + +// clean a block from working set. This behaviour would prevent +// our memory usage growing infinity. +func (to *totalOrdering) clean(b *types.Block) { + var ( + h = b.Hash + chainID = b.Position.ChainID + ) + to.objCache.recycleAckedVector(to.acked[h]) + delete(to.acked, h) + delete(to.pendings, h) + to.candidates[chainID].recycle(to.objCache) + to.candidates[chainID] = nil + delete(to.candidateChainMapping, chainID) + // Remove this candidate from candidate IDs. + to.candidateChainIDs = + removeFromSortedUint32Slice(to.candidateChainIDs, chainID) + // Clear records of this candidate from other candidates. + for _, idx := range to.candidateChainIDs { + to.candidates[idx].clean(chainID) + } +} + +// updateVectors is a helper function to update all cached vectors. +func (to *totalOrdering) updateVectors(b *types.Block) (pos int, err error) { + var ( + candidateHash common.Hash + chainID uint32 + acked bool + pending bool + ) + // Update global height vector + if pos, pending, err = to.globalVector.addBlock(b); err != nil { + return + } + if to.duringFlush { + // It makes no sense to calculate potential functions of total ordering + // when flushing would be happened. + return + } + if pending { + // The chain of this block contains breakpoints, which means their + // height are not continuous. This implementation of DEXON total + // ordering algorithm assumes the height of blocks in working set should + // be continuous. + // + // To workaround this issue, when block arrived after breakpoints, + // their information would not be contributed to current working set. + // This mechanism works because we switch rounds by flushing and + // reset the whole working set. + return + } + // Update acking status of candidates. + for chainID, candidateHash = range to.candidateChainMapping { + if _, acked = to.acked[candidateHash][b.Hash]; !acked { + continue + } + if err = to.candidates[chainID].addBlock(b); err != nil { + return + } + } + return +} + +// prepareCandidate is a helper function to +// build totalOrderingCandidateInfo for new candidate. +func (to *totalOrdering) prepareCandidate(candidate *types.Block) { + var ( + info = newTotalOrderingCandidateInfo(candidate.Hash, to.objCache) + chainID = candidate.Position.ChainID + ) + to.candidates[chainID] = info + to.candidateChainMapping[chainID] = candidate.Hash + // Add index to slot to allocated list, make sure the modified list sorted. + to.candidateChainIDs = append(to.candidateChainIDs, chainID) + sort.Slice(to.candidateChainIDs, func(i, j int) bool { + return to.candidateChainIDs[i] < to.candidateChainIDs[j] + }) + to.globalVector.prepareHeightRecord( + candidate, info, to.acked[candidate.Hash]) + return +} + +// isAckOnlyPrecedings is a helper function to check if a block +// only contain acks to delivered blocks. +func (to *totalOrdering) isAckOnlyPrecedings(b *types.Block) bool { + for _, ack := range b.Acks { + if _, pending := to.pendings[ack]; pending { + return false + } + } + return true +} + +// output is a helper function to finish the delivery of +// deliverable preceding set. +func (to *totalOrdering) output( + precedings map[common.Hash]struct{}, + numChains uint32) (ret []*types.Block) { + for p := range precedings { + // Remove the first element from corresponding blockVector. + b := to.pendings[p] + chainID := b.Position.ChainID + // TODO(mission): This way to use slice makes it reallocate frequently. + to.globalVector.blocks[int(chainID)] = + to.globalVector.blocks[int(chainID)][1:] + ret = append(ret, b) + // Remove block relations. + to.clean(b) + to.dirtyChainIDs = append(to.dirtyChainIDs, int(chainID)) + } + sort.Sort(types.ByHash(ret)) + // Find new candidates from tip of globalVector of each chain. + // The complexity here is O(N^2logN). + // TODO(mission): only those tips that acking some blocks in + // the devliered set should be checked. This + // improvment related to the latency introduced by K. + for chainID, blocks := range to.globalVector.blocks[:numChains] { + if len(blocks) == 0 { + continue + } + if _, picked := to.candidateChainMapping[uint32(chainID)]; picked { + continue + } + if !to.isAckOnlyPrecedings(blocks[0]) { + continue + } + // Build totalOrderingCandidateInfo for new candidate. + to.prepareCandidate(blocks[0]) + } + return ret +} + +// generateDeliverSet would: +// - generate preceding set +// - check if the preceding set deliverable by checking potential function +func (to *totalOrdering) generateDeliverSet() ( + delivered map[common.Hash]struct{}, mode uint32) { + var ( + chainID, otherChainID uint32 + info, otherInfo *totalOrderingCandidateInfo + precedings = make(map[uint32]struct{}) + cfg = to.configs[to.curRound-to.configs[0].roundID] + ) + mode = TotalOrderingModeNormal + to.globalVector.updateCandidateInfo(to.dirtyChainIDs, to.objCache) + globalInfo := to.globalVector.cachedCandidateInfo + for _, chainID = range to.candidateChainIDs { + to.candidates[chainID].updateAckingHeightVector( + globalInfo, cfg.k, to.dirtyChainIDs, to.objCache) + } + // Update winning records for each candidate. + // TODO(mission): It's not reasonable to + // request one routine for each candidate, the context + // switch rate would be high. + var wg sync.WaitGroup + wg.Add(len(to.candidateChainIDs)) + for _, chainID := range to.candidateChainIDs { + info = to.candidates[chainID] + go func(can uint32, canInfo *totalOrderingCandidateInfo) { + for _, otherChainID := range to.candidateChainIDs { + if can == otherChainID { + continue + } + canInfo.updateWinRecord( + otherChainID, + to.candidates[otherChainID], + to.dirtyChainIDs, + to.objCache, + cfg.numChains) + } + wg.Done() + }(chainID, info) + } + wg.Wait() + // Reset dirty chains. + to.dirtyChainIDs = to.dirtyChainIDs[:0] + // TODO(mission): ANS should be bound by current numChains. + globalAnsLength := globalInfo.getAckingNodeSetLength( + globalInfo, cfg.k, cfg.numChains) +CheckNextCandidateLoop: + for _, chainID = range to.candidateChainIDs { + info = to.candidates[chainID] + for _, otherChainID = range to.candidateChainIDs { + if chainID == otherChainID { + continue + } + otherInfo = to.candidates[otherChainID] + // TODO(mission): grade should be bound by current numChains. + if otherInfo.winRecords[chainID].grade( + cfg.numChains, cfg.phi, globalAnsLength) != 0 { + continue CheckNextCandidateLoop + } + } + precedings[chainID] = struct{}{} + } + if len(precedings) == 0 { + return + } + // internal is a helper function to verify internal stability. + internal := func() bool { + var ( + isPreceding, beaten bool + p uint32 + ) + for _, chainID = range to.candidateChainIDs { + if _, isPreceding = precedings[chainID]; isPreceding { + continue + } + beaten = false + for p = range precedings { + // TODO(mission): grade should be bound by current numChains. + if beaten = to.candidates[p].winRecords[chainID].grade( + cfg.numChains, cfg.phi, globalAnsLength) == 1; beaten { + break + } + } + if !beaten { + return false + } + } + return true + } + // checkAHV is a helper function to verify external stability. + // It would make sure some preceding block is strong enough + // to lead the whole preceding set. + checkAHV := func() bool { + var ( + height, count uint64 + p uint32 + ) + for p = range precedings { + count = 0 + info = to.candidates[p] + for _, height = range info.cachedHeightVector { + if height != infinity { + count++ + if count > cfg.phi { + return true + } + } + } + } + return false + } + // checkANS is a helper function to verify external stability. + // It would make sure all preceding blocks are strong enough + // to be delivered. + checkANS := func() bool { + var chainAnsLength uint64 + for p := range precedings { + // TODO(mission): ANS should be bound by current numChains. + chainAnsLength = to.candidates[p].getAckingNodeSetLength( + globalInfo, cfg.k, cfg.numChains) + if uint64(chainAnsLength) < uint64(cfg.numChains)-cfg.phi { + return false + } + } + return true + } + // If all chains propose enough blocks, we should force + // to deliver since the whole picture of the DAG is revealed. + if globalAnsLength != uint64(cfg.numChains) { + // Check internal stability first. + if !internal() { + return + } + + // The whole picture is not ready, we need to check if + // exteranl stability is met, and we can deliver earlier. + if checkAHV() && checkANS() { + mode = TotalOrderingModeEarly + } else { + return + } + } + delivered = make(map[common.Hash]struct{}) + for p := range precedings { + delivered[to.candidates[p].hash] = struct{}{} + } + return +} + +// flushBlocks flushes blocks. +func (to *totalOrdering) flushBlocks( + b *types.Block) (flushed []*types.Block, mode uint32, err error) { + cfg := to.configs[to.curRound-to.configs[0].roundID] + mode = TotalOrderingModeFlush + if cfg.isValidLastBlock(b) { + to.flushReadyChains[b.Position.ChainID] = struct{}{} + } + // Flush blocks until last blocks from all chains are arrived. + if len(to.flushReadyChains) < int(cfg.numChains) { + return + } + if len(to.flushReadyChains) > int(cfg.numChains) { + // This line should never be reached. + err = ErrFutureRoundDelivered + return + } + // Dump all blocks in this round. + for { + if len(to.flushed) == int(cfg.numChains) { + break + } + // Dump all candidates without checking potential function. + flushedHashes := make(map[common.Hash]struct{}) + for _, chainID := range to.candidateChainIDs { + candidateBlock := to.pendings[to.candidates[chainID].hash] + if candidateBlock.Position.Round > to.curRound { + continue + } + flushedHashes[candidateBlock.Hash] = struct{}{} + } + if len(flushedHashes) == 0 { + err = ErrTotalOrderingHangs + return + } + flushedBlocks := to.output(flushedHashes, cfg.numChains) + for _, b := range flushedBlocks { + if !cfg.isValidLastBlock(b) { + continue + } + to.flushed[b.Position.ChainID] = struct{}{} + } + flushed = append(flushed, flushedBlocks...) + } + // Switch back to normal mode: delivered by DEXON total ordering algorithm. + to.duringFlush = false + to.flushed = make(map[uint32]struct{}) + to.flushReadyChains = make(map[uint32]struct{}) + // Clean all cached intermediate stats. + for idx := range to.candidates { + if to.candidates[idx] == nil { + continue + } + to.candidates[idx].recycle(to.objCache) + to.candidates[idx] = nil + } + to.dirtyChainIDs = nil + to.candidateChainMapping = make(map[uint32]common.Hash) + to.candidateChainIDs = nil + to.globalVector.cachedCandidateInfo = nil + to.switchRound() + // Force to pick new candidates. + numChains := to.configs[to.curRound-to.configs[0].roundID].numChains + to.output(map[common.Hash]struct{}{}, numChains) + return +} + +// deliverBlocks delivers blocks by DEXON total ordering algorithm. +func (to *totalOrdering) deliverBlocks() ( + delivered []*types.Block, mode uint32, err error) { + hashes, mode := to.generateDeliverSet() + cfg := to.configs[to.curRound-to.configs[0].roundID] + // output precedings + delivered = to.output(hashes, cfg.numChains) + // Check if any block in delivered set are the last block in this round + // of that chain. If yes, flush or round-switching would be performed. + for _, b := range delivered { + if b.Position.Round > to.curRound { + err = ErrFutureRoundDelivered + return + } + if !cfg.isValidLastBlock(b) { + continue + } + if cfg.isFlushRequired { + // Switch to flush mode. + to.duringFlush = true + to.flushReadyChains = make(map[uint32]struct{}) + to.flushed = make(map[uint32]struct{}) + } else { + // Switch round directly. + to.switchRound() + } + break + } + if to.duringFlush { + // Make sure last blocks from all chains are marked as 'flushed'. + for _, b := range delivered { + if !cfg.isValidLastBlock(b) { + continue + } + to.flushReadyChains[b.Position.ChainID] = struct{}{} + to.flushed[b.Position.ChainID] = struct{}{} + } + } + return +} + +// processBlock is the entry point of totalOrdering. +func (to *totalOrdering) processBlock( + b *types.Block) ([]*types.Block, uint32, error) { + // NOTE: I assume the block 'b' is already safe for total ordering. + // That means, all its acking blocks are during/after + // total ordering stage. + cfg := to.configs[to.curRound-to.configs[0].roundID] + to.pendings[b.Hash] = b + to.buildBlockRelation(b) + pos, err := to.updateVectors(b) + if err != nil { + return nil, uint32(0), err + } + // Mark the proposer of incoming block as dirty. + if b.Position.ChainID < cfg.numChains { + to.dirtyChainIDs = append(to.dirtyChainIDs, int(b.Position.ChainID)) + _, picked := to.candidateChainMapping[b.Position.ChainID] + if pos == 0 && !picked { + if to.isAckOnlyPrecedings(b) { + to.prepareCandidate(b) + } + } + } + if to.duringFlush { + return to.flushBlocks(b) + } + return to.deliverBlocks() +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/block-randomness.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/block-randomness.go new file mode 100644 index 000000000..ebffd2b22 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/block-randomness.go @@ -0,0 +1,43 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "fmt" + + "github.com/dexon-foundation/dexon-consensus-core/common" +) + +// AgreementResult describes an agremeent result. +type AgreementResult struct { + BlockHash common.Hash `json:"block_hash"` + Position Position `json:"position"` + Votes []Vote `json:"votes"` +} + +func (r *AgreementResult) String() string { + return fmt.Sprintf( + "agreementResult[%s:%s]", r.BlockHash, &r.Position) +} + +// BlockRandomnessResult describes a block randomness result +type BlockRandomnessResult struct { + BlockHash common.Hash `json:"block_hash"` + Position Position `json:"position"` + Randomness []byte `json:"randomness"` +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/block.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/block.go new file mode 100644 index 000000000..9de467397 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/block.go @@ -0,0 +1,347 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +// TODO(jimmy-dexon): remove comments of WitnessAck before open source. + +package types + +import ( + "bytes" + "fmt" + "io" + "sort" + "sync" + "time" + + "github.com/dexon-foundation/dexon/rlp" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" +) + +var ( + // blockPool is the blocks cache to reuse allocated blocks. + blockPool = sync.Pool{ + New: func() interface{} { + return &Block{} + }, + } +) + +type rlpTimestamp struct { + time.Time +} + +func (t *rlpTimestamp) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, uint64(t.UTC().UnixNano())) +} + +func (t *rlpTimestamp) DecodeRLP(s *rlp.Stream) error { + var nano uint64 + err := s.Decode(&nano) + if err == nil { + sec := int64(nano) / 1000000000 + nsec := int64(nano) % 1000000000 + t.Time = time.Unix(sec, nsec).UTC() + } + return err +} + +// FinalizationResult represents the result of DEXON consensus algorithm. +type FinalizationResult struct { + Randomness []byte `json:"randomness"` + Timestamp time.Time `json:"timestamp"` + Height uint64 `json:"height"` +} + +type rlpFinalizationResult struct { + Randomness []byte + Timestamp *rlpTimestamp + Height uint64 +} + +// EncodeRLP implements rlp.Encoder +func (f *FinalizationResult) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, &rlpFinalizationResult{ + Randomness: f.Randomness, + Timestamp: &rlpTimestamp{f.Timestamp}, + Height: f.Height, + }) +} + +// DecodeRLP implements rlp.Decoder +func (f *FinalizationResult) DecodeRLP(s *rlp.Stream) error { + var dec rlpFinalizationResult + err := s.Decode(&dec) + if err == nil { + *f = FinalizationResult{ + Randomness: dec.Randomness, + Timestamp: dec.Timestamp.Time, + Height: dec.Height, + } + } + return err +} + +// Witness represents the consensus information on the compaction chain. +type Witness struct { + Timestamp time.Time `json:"timestamp"` + Height uint64 `json:"height"` + Data []byte `json:"data"` +} + +type rlpWitness struct { + Timestamp *rlpTimestamp + Height uint64 + Data []byte +} + +// EncodeRLP implements rlp.Encoder +func (w *Witness) EncodeRLP(writer io.Writer) error { + return rlp.Encode(writer, rlpWitness{ + Timestamp: &rlpTimestamp{w.Timestamp}, + Height: w.Height, + Data: w.Data, + }) +} + +// DecodeRLP implements rlp.Decoder +func (w *Witness) DecodeRLP(s *rlp.Stream) error { + var dec rlpWitness + err := s.Decode(&dec) + if err == nil { + *w = Witness{ + Timestamp: dec.Timestamp.Time, + Height: dec.Height, + Data: dec.Data, + } + } + return err +} + +// RecycleBlock put unused block into cache, which might be reused if +// not garbage collected. +func RecycleBlock(b *Block) { + blockPool.Put(b) +} + +// NewBlock initiate a block. +func NewBlock() (b *Block) { + b = blockPool.Get().(*Block) + b.Acks = b.Acks[:0] + return +} + +// Block represents a single event broadcasted on the network. +type Block struct { + ProposerID NodeID `json:"proposer_id"` + ParentHash common.Hash `json:"parent_hash"` + Hash common.Hash `json:"hash"` + Position Position `json:"position"` + Timestamp time.Time `json:"timestamp"` + Acks common.SortedHashes `json:"acks"` + Payload []byte `json:"payload"` + Witness Witness `json:"witness"` + Finalization FinalizationResult `json:"finalization"` + Signature crypto.Signature `json:"signature"` + + CRSSignature crypto.Signature `json:"crs_signature"` +} + +type rlpBlock struct { + ProposerID NodeID + ParentHash common.Hash + Hash common.Hash + Position Position + Timestamp *rlpTimestamp + Acks common.SortedHashes + Payload []byte + Witness *Witness + Finalization *FinalizationResult + Signature crypto.Signature + + CRSSignature crypto.Signature +} + +// EncodeRLP implements rlp.Encoder +func (b *Block) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, rlpBlock{ + ProposerID: b.ProposerID, + ParentHash: b.ParentHash, + Hash: b.Hash, + Position: b.Position, + Timestamp: &rlpTimestamp{b.Timestamp}, + Acks: b.Acks, + Payload: b.Payload, + Witness: &b.Witness, + Finalization: &b.Finalization, + Signature: b.Signature, + CRSSignature: b.CRSSignature, + }) +} + +// DecodeRLP implements rlp.Decoder +func (b *Block) DecodeRLP(s *rlp.Stream) error { + var dec rlpBlock + err := s.Decode(&dec) + if err == nil { + *b = Block{ + ProposerID: dec.ProposerID, + ParentHash: dec.ParentHash, + Hash: dec.Hash, + Position: dec.Position, + Timestamp: dec.Timestamp.Time, + Acks: dec.Acks, + Payload: dec.Payload, + Witness: *dec.Witness, + Finalization: *dec.Finalization, + Signature: dec.Signature, + CRSSignature: dec.CRSSignature, + } + } + return err +} + +func (b *Block) String() string { + return fmt.Sprintf("Block(%v:%d:%d)", b.Hash.String()[:6], + b.Position.ChainID, b.Position.Height) +} + +// Clone returns a deep copy of a block. +func (b *Block) Clone() (bcopy *Block) { + bcopy = NewBlock() + bcopy.ProposerID = b.ProposerID + bcopy.ParentHash = b.ParentHash + bcopy.Hash = b.Hash + bcopy.Position.ChainID = b.Position.ChainID + bcopy.Position.Height = b.Position.Height + bcopy.Signature = b.Signature.Clone() + bcopy.CRSSignature = b.CRSSignature.Clone() + bcopy.Finalization.Timestamp = b.Finalization.Timestamp + bcopy.Finalization.Height = b.Finalization.Height + bcopy.Witness.Timestamp = b.Witness.Timestamp + bcopy.Witness.Height = b.Witness.Height + bcopy.Witness.Data = make([]byte, len(b.Witness.Data)) + copy(bcopy.Witness.Data, b.Witness.Data) + bcopy.Timestamp = b.Timestamp + bcopy.Acks = make(common.SortedHashes, len(b.Acks)) + copy(bcopy.Acks, b.Acks) + bcopy.Payload = make([]byte, len(b.Payload)) + copy(bcopy.Payload, b.Payload) + bcopy.Finalization.Randomness = make([]byte, len(b.Finalization.Randomness)) + copy(bcopy.Finalization.Randomness, b.Finalization.Randomness) + return +} + +// IsGenesis checks if the block is a genesisBlock +func (b *Block) IsGenesis() bool { + return b.Position.Height == 0 && b.ParentHash == common.Hash{} +} + +// IsFinalized checks if the finalization data is ready. +func (b *Block) IsFinalized() bool { + return b.Finalization.Height != 0 +} + +// IsEmpty checks if the block is an 'empty block'. +func (b *Block) IsEmpty() bool { + return b.ProposerID.Hash == common.Hash{} +} + +// IsAcking checks if a block acking another by it's hash. +func (b *Block) IsAcking(hash common.Hash) bool { + idx := sort.Search(len(b.Acks), func(i int) bool { + return bytes.Compare(b.Acks[i][:], hash[:]) >= 0 + }) + return !(idx == len(b.Acks) || b.Acks[idx] != hash) +} + +// ByHash is the helper type for sorting slice of blocks by hash. +type ByHash []*Block + +func (b ByHash) Len() int { + return len(b) +} + +func (b ByHash) Less(i int, j int) bool { + return bytes.Compare([]byte(b[i].Hash[:]), []byte(b[j].Hash[:])) == -1 +} + +func (b ByHash) Swap(i int, j int) { + b[i], b[j] = b[j], b[i] +} + +// ByPosition is the helper type for sorting slice of blocks by position. +type ByPosition []*Block + +// Len implements Len method in sort.Sort interface. +func (bs ByPosition) Len() int { + return len(bs) +} + +// Less implements Less method in sort.Sort interface. +func (bs ByPosition) Less(i int, j int) bool { + return bs[j].Position.Newer(&bs[i].Position) +} + +// Swap implements Swap method in sort.Sort interface. +func (bs ByPosition) Swap(i int, j int) { + bs[i], bs[j] = bs[j], bs[i] +} + +// Push implements Push method in heap interface. +func (bs *ByPosition) Push(x interface{}) { + *bs = append(*bs, x.(*Block)) +} + +// Pop implements Pop method in heap interface. +func (bs *ByPosition) Pop() (ret interface{}) { + n := len(*bs) + *bs, ret = (*bs)[0:n-1], (*bs)[n-1] + return +} + +// ByFinalizationHeight is the helper type for sorting slice of blocks by +// finalization height. +type ByFinalizationHeight []*Block + +// Len implements Len method in sort.Sort interface. +func (bs ByFinalizationHeight) Len() int { + return len(bs) +} + +// Less implements Less method in sort.Sort interface. +func (bs ByFinalizationHeight) Less(i int, j int) bool { + return bs[i].Finalization.Height < bs[j].Finalization.Height +} + +// Swap implements Swap method in sort.Sort interface. +func (bs ByFinalizationHeight) Swap(i int, j int) { + bs[i], bs[j] = bs[j], bs[i] +} + +// Push implements Push method in heap interface. +func (bs *ByFinalizationHeight) Push(x interface{}) { + *bs = append(*bs, x.(*Block)) +} + +// Pop implements Pop method in heap interface. +func (bs *ByFinalizationHeight) Pop() (ret interface{}) { + n := len(*bs) + *bs, ret = (*bs)[0:n-1], (*bs)[n-1] + return +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/config.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/config.go new file mode 100644 index 000000000..372ffb4da --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/config.go @@ -0,0 +1,93 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "encoding/binary" + "math" + "time" +) + +// Config stands for Current Configuration Parameters. +type Config struct { + // Network related. + NumChains uint32 + + // Lambda related. + LambdaBA time.Duration + LambdaDKG time.Duration + + // Total ordering related. + K int + PhiRatio float32 + + // Set related. + NotarySetSize uint32 + DKGSetSize uint32 + + // Time related. + RoundInterval time.Duration + MinBlockInterval time.Duration + MaxBlockInterval time.Duration +} + +// Bytes returns []byte representation of Config. +func (c *Config) Bytes() []byte { + binaryNumChains := make([]byte, 4) + binary.LittleEndian.PutUint32(binaryNumChains, c.NumChains) + + binaryLambdaBA := make([]byte, 8) + binary.LittleEndian.PutUint64( + binaryLambdaBA, uint64(c.LambdaBA.Nanoseconds())) + binaryLambdaDKG := make([]byte, 8) + binary.LittleEndian.PutUint64( + binaryLambdaDKG, uint64(c.LambdaDKG.Nanoseconds())) + + binaryK := make([]byte, 4) + binary.LittleEndian.PutUint32(binaryK, uint32(c.K)) + binaryPhiRatio := make([]byte, 4) + binary.LittleEndian.PutUint32(binaryPhiRatio, math.Float32bits(c.PhiRatio)) + + binaryNotarySetSize := make([]byte, 4) + binary.LittleEndian.PutUint32(binaryNotarySetSize, c.NotarySetSize) + binaryDKGSetSize := make([]byte, 4) + binary.LittleEndian.PutUint32(binaryDKGSetSize, c.DKGSetSize) + + binaryRoundInterval := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRoundInterval, + uint64(c.RoundInterval.Nanoseconds())) + binaryMinBlockInterval := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryMinBlockInterval, + uint64(c.MinBlockInterval.Nanoseconds())) + binaryMaxBlockInterval := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryMaxBlockInterval, + uint64(c.MaxBlockInterval.Nanoseconds())) + + enc := make([]byte, 0, 40) + enc = append(enc, binaryNumChains...) + enc = append(enc, binaryLambdaBA...) + enc = append(enc, binaryLambdaDKG...) + enc = append(enc, binaryK...) + enc = append(enc, binaryPhiRatio...) + enc = append(enc, binaryNotarySetSize...) + enc = append(enc, binaryDKGSetSize...) + enc = append(enc, binaryRoundInterval...) + enc = append(enc, binaryMinBlockInterval...) + enc = append(enc, binaryMaxBlockInterval...) + return enc +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/dkg.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/dkg.go new file mode 100644 index 000000000..edd420df9 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/dkg.go @@ -0,0 +1,193 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + "github.com/dexon-foundation/dexon/rlp" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg" +) + +// DKGPrivateShare describe a secret share in DKG protocol. +type DKGPrivateShare struct { + ProposerID NodeID `json:"proposer_id"` + ReceiverID NodeID `json:"receiver_id"` + Round uint64 `json:"round"` + PrivateShare dkg.PrivateKey `json:"private_share"` + Signature crypto.Signature `json:"signature"` +} + +// Equal checks equality between two DKGPrivateShare instances. +func (p *DKGPrivateShare) Equal(other *DKGPrivateShare) bool { + return p.ProposerID.Equal(other.ProposerID) && + p.ReceiverID.Equal(other.ReceiverID) && + p.Round == other.Round && + p.Signature.Type == other.Signature.Type && + bytes.Compare(p.Signature.Signature, other.Signature.Signature) == 0 && + bytes.Compare( + p.PrivateShare.Bytes(), other.PrivateShare.Bytes()) == 0 +} + +// DKGMasterPublicKey decrtibe a master public key in DKG protocol. +type DKGMasterPublicKey struct { + ProposerID NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + DKGID dkg.ID `json:"dkg_id"` + PublicKeyShares dkg.PublicKeyShares `json:"public_key_shares"` + Signature crypto.Signature `json:"signature"` +} + +func (d *DKGMasterPublicKey) String() string { + return fmt.Sprintf("MasterPublicKey[%s:%d]", + d.ProposerID.String()[:6], + d.Round) +} + +// Equal check equality of two DKG master public keys. +func (d *DKGMasterPublicKey) Equal(other *DKGMasterPublicKey) bool { + return d.ProposerID.Equal(other.ProposerID) && + d.Round == other.Round && + d.DKGID.GetHexString() == other.DKGID.GetHexString() && + d.PublicKeyShares.Equal(&other.PublicKeyShares) && + d.Signature.Type == other.Signature.Type && + bytes.Compare(d.Signature.Signature, other.Signature.Signature) == 0 +} + +type rlpDKGMasterPublicKey struct { + ProposerID NodeID + Round uint64 + DKGID []byte + PublicKeyShares *dkg.PublicKeyShares + Signature crypto.Signature +} + +// EncodeRLP implements rlp.Encoder +func (d *DKGMasterPublicKey) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, rlpDKGMasterPublicKey{ + ProposerID: d.ProposerID, + Round: d.Round, + DKGID: d.DKGID.GetLittleEndian(), + PublicKeyShares: &d.PublicKeyShares, + Signature: d.Signature, + }) +} + +// DecodeRLP implements rlp.Decoder +func (d *DKGMasterPublicKey) DecodeRLP(s *rlp.Stream) error { + var dec rlpDKGMasterPublicKey + if err := s.Decode(&dec); err != nil { + return err + } + + id, err := dkg.BytesID(dec.DKGID) + if err != nil { + return err + } + + *d = DKGMasterPublicKey{ + ProposerID: dec.ProposerID, + Round: dec.Round, + DKGID: id, + PublicKeyShares: *dec.PublicKeyShares, + Signature: dec.Signature, + } + return err +} + +// NewDKGMasterPublicKey returns a new DKGMasterPublicKey instance. +func NewDKGMasterPublicKey() *DKGMasterPublicKey { + return &DKGMasterPublicKey{ + PublicKeyShares: *dkg.NewEmptyPublicKeyShares(), + } +} + +// UnmarshalJSON implements json.Unmarshaller. +func (d *DKGMasterPublicKey) UnmarshalJSON(data []byte) error { + type innertDKGMasterPublicKey DKGMasterPublicKey + d.PublicKeyShares = *dkg.NewEmptyPublicKeyShares() + return json.Unmarshal(data, (*innertDKGMasterPublicKey)(d)) +} + +// DKGComplaint describe a complaint in DKG protocol. +type DKGComplaint struct { + ProposerID NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + PrivateShare DKGPrivateShare `json:"private_share"` + Signature crypto.Signature `json:"signature"` +} + +func (c *DKGComplaint) String() string { + if c.IsNack() { + return fmt.Sprintf("DKGNackComplaint[%s:%d]%s", + c.ProposerID.String()[:6], c.Round, + c.PrivateShare.ProposerID.String()[:6]) + } + return fmt.Sprintf("DKGComplaint[%s:%d]%v", + c.ProposerID.String()[:6], c.Round, c.PrivateShare) +} + +// Equal checks equality between two DKGComplaint instances. +func (c *DKGComplaint) Equal(other *DKGComplaint) bool { + return c.ProposerID.Equal(other.ProposerID) && + c.Round == other.Round && + c.PrivateShare.Equal(&other.PrivateShare) && + c.Signature.Type == other.Signature.Type && + bytes.Compare(c.Signature.Signature, other.Signature.Signature) == 0 +} + +// DKGPartialSignature describe a partial signature in DKG protocol. +type DKGPartialSignature struct { + ProposerID NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Hash common.Hash `json:"hash"` + PartialSignature dkg.PartialSignature `json:"partial_signature"` + Signature crypto.Signature `json:"signature"` +} + +// DKGFinalize describe a dig finalize message in DKG protocol. +type DKGFinalize struct { + ProposerID NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Signature crypto.Signature `json:"signature"` +} + +func (final *DKGFinalize) String() string { + return fmt.Sprintf("DKGFinal[%s:%d]", + final.ProposerID.String()[:6], + final.Round) +} + +// Equal check equality of two DKGFinalize instances. +func (final *DKGFinalize) Equal(other *DKGFinalize) bool { + return final.ProposerID.Equal(other.ProposerID) && + final.Round == other.Round && + final.Signature.Type == other.Signature.Type && + bytes.Compare(final.Signature.Signature, other.Signature.Signature) == 0 +} + +// IsNack returns true if it's a nack complaint in DKG protocol. +func (c *DKGComplaint) IsNack() bool { + return len(c.PrivateShare.Signature.Signature) == 0 +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/node.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/node.go new file mode 100644 index 000000000..839c2bf3b --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/node.go @@ -0,0 +1,56 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "bytes" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" +) + +// NodeID is the ID type for nodes. +type NodeID struct { + common.Hash +} + +// NewNodeID returns a NodeID with Hash set to the hash value of +// public key. +func NewNodeID(pubKey crypto.PublicKey) NodeID { + return NodeID{Hash: crypto.Keccak256Hash(pubKey.Bytes()[1:])} +} + +// Equal checks if the hash representation is the same NodeID. +func (v NodeID) Equal(v2 NodeID) bool { + return v.Hash == v2.Hash +} + +// NodeIDs implements sort.Interface for NodeID. +type NodeIDs []NodeID + +func (v NodeIDs) Len() int { + return len(v) +} + +func (v NodeIDs) Less(i int, j int) bool { + return bytes.Compare([]byte(v[i].Hash[:]), []byte(v[j].Hash[:])) == -1 +} + +func (v NodeIDs) Swap(i int, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/nodeset.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/nodeset.go new file mode 100644 index 000000000..eb83f19ab --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/nodeset.go @@ -0,0 +1,145 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "container/heap" + "encoding/binary" + "math/big" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" +) + +// NodeSet is the node set structure as defined in DEXON consensus core. +type NodeSet struct { + IDs map[NodeID]struct{} +} + +// SubSetTarget is the sub set target for GetSubSet(). +type SubSetTarget *big.Int + +type subSetTargetType byte + +const ( + targetNotarySet subSetTargetType = iota + targetDKGSet +) + +type nodeRank struct { + ID NodeID + rank *big.Int +} + +// rankHeap is a MaxHeap structure. +type rankHeap []*nodeRank + +func (h rankHeap) Len() int { return len(h) } +func (h rankHeap) Less(i, j int) bool { return h[i].rank.Cmp(h[j].rank) > 0 } +func (h rankHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h *rankHeap) Push(x interface{}) { + *h = append(*h, x.(*nodeRank)) +} +func (h *rankHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +// NewNodeSet creates a new NodeSet instance. +func NewNodeSet() *NodeSet { + return &NodeSet{ + IDs: make(map[NodeID]struct{}), + } +} + +// NewNotarySetTarget is the target for getting Notary Set. +func NewNotarySetTarget(crs common.Hash, chainID uint32) SubSetTarget { + binaryChainID := make([]byte, 4) + binary.LittleEndian.PutUint32(binaryChainID, chainID) + + return newTarget(targetNotarySet, crs[:], binaryChainID) +} + +// NewDKGSetTarget is the target for getting DKG Set. +func NewDKGSetTarget(crs common.Hash) SubSetTarget { + return newTarget(targetDKGSet, crs[:]) +} + +// Add a NodeID to the set. +func (ns *NodeSet) Add(ID NodeID) { + ns.IDs[ID] = struct{}{} +} + +// Clone the NodeSet. +func (ns *NodeSet) Clone() *NodeSet { + nsCopy := NewNodeSet() + for ID := range ns.IDs { + nsCopy.Add(ID) + } + return nsCopy +} + +// GetSubSet returns the subset of given target. +func (ns *NodeSet) GetSubSet( + size int, target SubSetTarget) map[NodeID]struct{} { + h := rankHeap{} + idx := 0 + for nID := range ns.IDs { + if idx < size { + h = append(h, newNodeRank(nID, target)) + } else if idx == size { + heap.Init(&h) + } + if idx >= size { + rank := newNodeRank(nID, target) + if rank.rank.Cmp(h[0].rank) < 0 { + h[0] = rank + heap.Fix(&h, 0) + } + } + idx++ + } + + nIDs := make(map[NodeID]struct{}, size) + for _, rank := range h { + nIDs[rank.ID] = struct{}{} + } + + return nIDs +} + +func newTarget(targetType subSetTargetType, data ...[]byte) SubSetTarget { + data = append(data, []byte{byte(targetType)}) + h := crypto.Keccak256Hash(data...) + num := big.NewInt(0) + num.SetBytes(h[:]) + return SubSetTarget(num) +} + +func newNodeRank(ID NodeID, target SubSetTarget) *nodeRank { + num := big.NewInt(0) + num.SetBytes(ID.Hash[:]) + num.Abs(num.Sub((*big.Int)(target), num)) + return &nodeRank{ + ID: ID, + rank: num, + } +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/position.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/position.go new file mode 100644 index 000000000..51de405c3 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/position.go @@ -0,0 +1,67 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "errors" + "fmt" +) + +// ErrComparePositionOnDifferentChains raised when attempting to +// compare two positions with different chain ID. +var ErrComparePositionOnDifferentChains = errors.New( + "position on different chain") + +// Position describes the position in the block lattice of an entity. +type Position struct { + ChainID uint32 `json:"chain_id"` + Round uint64 `json:"round"` + Height uint64 `json:"height"` +} + +func (pos *Position) String() string { + return fmt.Sprintf("pos[%d:%d:%d]", pos.ChainID, pos.Round, pos.Height) +} + +// Equal checks if two positions are equal, it panics when their chainIDs +// are different. +func (pos *Position) Equal(other *Position) bool { + if pos.ChainID != other.ChainID { + panic(ErrComparePositionOnDifferentChains) + } + return pos.Round == other.Round && pos.Height == other.Height +} + +// Newer checks if one block is newer than another one on the same chain. +// If two blocks on different chain compared by this function, it would panic. +func (pos *Position) Newer(other *Position) bool { + if pos.ChainID != other.ChainID { + panic(ErrComparePositionOnDifferentChains) + } + return pos.Round > other.Round || + (pos.Round == other.Round && pos.Height > other.Height) +} + +// Clone a position instance. +func (pos *Position) Clone() *Position { + return &Position{ + ChainID: pos.ChainID, + Round: pos.Round, + Height: pos.Height, + } +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/vote.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/vote.go new file mode 100644 index 000000000..bbf2f266b --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/vote.go @@ -0,0 +1,65 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "fmt" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" +) + +// VoteType is the type of vote. +type VoteType byte + +// VoteType enum. +const ( + VoteInit VoteType = iota + VotePreCom + VoteCom + // Do not add any type below MaxVoteType. + MaxVoteType +) + +// Vote is the vote structure defined in Crypto Shuffle Algorithm. +type Vote struct { + ProposerID NodeID `json:"proposer_id"` + Type VoteType `json:"type"` + BlockHash common.Hash `json:"block_hash"` + Period uint64 `json:"period"` + Position Position `json:"position"` + Signature crypto.Signature `json:"signature"` +} + +func (v *Vote) String() string { + return fmt.Sprintf("Vote[%s:%d:%d](%d:%d):%s", + v.ProposerID.String()[:6], v.Position.ChainID, v.Position.Height, + v.Period, v.Type, v.BlockHash.String()[:6]) +} + +// Clone returns a deep copy of a vote. +func (v *Vote) Clone() *Vote { + return &Vote{ + ProposerID: v.ProposerID, + Type: v.Type, + BlockHash: v.BlockHash, + Period: v.Period, + Position: v.Position, + Signature: v.Signature.Clone(), + } +} diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/utils.go new file mode 100644 index 000000000..ac9567811 --- /dev/null +++ b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/utils.go @@ -0,0 +1,140 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "errors" + "fmt" + "os" + "sort" + "time" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +var ( + debug = false + // ErrEmptyTimestamps would be reported if Block.timestamps is empty. + ErrEmptyTimestamps = errors.New("timestamp vector should not be empty") +) + +func init() { + if os.Getenv("DEBUG") != "" { + debug = true + } +} + +// Debugf is like fmt.Printf, but only output when we are in debug mode. +func Debugf(format string, args ...interface{}) { + if debug { + fmt.Printf(format, args...) + } +} + +// Debugln is like fmt.Println, but only output when we are in debug mode. +func Debugln(args ...interface{}) { + if debug { + fmt.Println(args) + } +} + +func interpoTime(t1 time.Time, t2 time.Time, sep int) []time.Time { + if sep == 0 { + return []time.Time{} + } + if t1.After(t2) { + return interpoTime(t2, t1, sep) + } + timestamps := make([]time.Time, sep) + duration := t2.Sub(t1) + period := time.Duration( + (duration.Nanoseconds() / int64(sep+1))) * time.Nanosecond + prevTime := t1 + for idx := range timestamps { + prevTime = prevTime.Add(period) + timestamps[idx] = prevTime + } + return timestamps +} + +func getMedianTime(timestamps []time.Time) (t time.Time, err error) { + if len(timestamps) == 0 { + err = ErrEmptyTimestamps + return + } + tscopy := make([]time.Time, 0, len(timestamps)) + for _, ts := range timestamps { + tscopy = append(tscopy, ts) + } + sort.Sort(common.ByTime(tscopy)) + if len(tscopy)%2 == 0 { + t1 := tscopy[len(tscopy)/2-1] + t2 := tscopy[len(tscopy)/2] + t = interpoTime(t1, t2, 1)[0] + } else { + t = tscopy[len(tscopy)/2] + } + return +} + +func removeFromSortedUint32Slice(xs []uint32, x uint32) []uint32 { + indexToRemove := sort.Search(len(xs), func(idx int) bool { + return xs[idx] >= x + }) + if indexToRemove == len(xs) || xs[indexToRemove] != x { + // This value is not found. + return xs + } + return append(xs[:indexToRemove], xs[indexToRemove+1:]...) +} + +// HashConfigurationBlock returns the hash value of configuration block. +func HashConfigurationBlock( + notarySet map[types.NodeID]struct{}, + config *types.Config, + snapshotHash common.Hash, + prevHash common.Hash, +) common.Hash { + notaryIDs := make(types.NodeIDs, 0, len(notarySet)) + for nID := range notarySet { + notaryIDs = append(notaryIDs, nID) + } + sort.Sort(notaryIDs) + notarySetBytes := make([]byte, 0, len(notarySet)*len(common.Hash{})) + for _, nID := range notaryIDs { + notarySetBytes = append(notarySetBytes, nID.Hash[:]...) + } + configBytes := config.Bytes() + + return crypto.Keccak256Hash( + notarySetBytes[:], + configBytes[:], + snapshotHash[:], + prevHash[:], + ) +} + +// DiffUint64 calculates difference between two uint64. +func DiffUint64(a, b uint64) uint64 { + if a > b { + return a - b + } + return b - a +} diff --git a/vendor/github.com/dexon-foundation/mcl/CMakeLists.txt b/vendor/github.com/dexon-foundation/mcl/CMakeLists.txt new file mode 100644 index 000000000..8ac418098 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/CMakeLists.txt @@ -0,0 +1,113 @@ +cmake_minimum_required (VERSION 2.6) +project(mcl CXX ASM) +set(SRCS src/fp.cpp) + +option( + MCL_MAX_BIT_SIZE + "max bit size for Fp" + 0 +) +option( + DOWNLOAD_SOURCE + "download cybozulib_ext" + OFF +) + +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) + +if(MSVC) + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS} /MT /W4 /Oy /Ox /EHsc /GS- /Zi /DNDEBUG /DNOMINMAX") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS} /MTd /W4 /DNOMINMAX") + link_directories(${CMAKE_SOURCE_DIR}/../cybozulib_ext/lib) + link_directories(${CMAKE_SOURCE_DIR}/lib) +else() + if("${CFLAGS_OPT_USER}" STREQUAL "") + set(CFLAGS_OPT_USER "-O3 -DNDEBUG -march=native") + endif() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wformat=2 -Wcast-qual -Wcast-align -Wwrite-strings -Wfloat-equal -Wpointer-arith ${CFLAGS_OPT_USER}") + + if(${MCL_MAX_BIT_SIZE} GREATER 0) + add_definitions(-DMCL_MAX_BIT_SIZE=${MCL_MAX_BIT_SIZE}) + endif() + + if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64") + add_definitions(-DMCL_USE_LLVM=1) + set(SRCS ${SRCS} src/asm/aarch64.s) + set(CPU arch64) + elseif(APPLE) + add_definitions(-DMCL_USE_LLVM=1) + set(SRCS ${SRCS} src/asm/x86-64mac.s src/asm/x86-64mac.bmi2.s) + set(CPU x86-64) + elseif(UNIX) + add_definitions(-DMCL_USE_LLVM=1) + set(SRCS ${SRCS} src/asm/x86-64.s src/asm/x86-64.bmi2.s) + set(CPU x86-64) + endif() + set(LIBS mcl gmp gmpxx crypto) +endif() + +if(DOWNLOAD_SOURCE) + if(MSVC) + set(CYBOZULIB_EXT_TAG release20170521) + set(FILES config.h gmp-impl.h gmp-mparam.h gmp.h gmpxx.h longlong.h mpir.h mpirxx.h) + foreach(file IN ITEMS ${FILES}) + file(DOWNLOAD https://raw.githubusercontent.com/herumi/cybozulib_ext/${CYBOZULIB_EXT_TAG}/include/${file} ${mcl_SOURCE_DIR}/include/cybozulib_ext/${file}) + message("download cybozulib_ext/" ${file}) + endforeach() + set(FILES aes.h applink.c asn1.h asn1_mac.h asn1t.h bio.h blowfish.h bn.h buffer.h camellia.h cast.h cmac.h cms.h comp.h conf.h conf_api.h crypto.h des.h des_old.h dh.h dsa.h dso.h dtls1.h e_os2.h ebcdic.h ec.h ecdh.h ecdsa.h engine.h err.h evp.h hmac.h idea.h krb5_asn.h kssl.h lhash.h md4.h md5.h mdc2.h modes.h obj_mac.h objects.h ocsp.h opensslconf.h opensslv.h ossl_typ.h pem.h pem2.h pkcs12.h pkcs7.h pqueue.h rand.h rc2.h rc4.h ripemd.h rsa.h safestack.h seed.h sha.h srp.h srtp.h ssl.h ssl2.h ssl23.h ssl3.h stack.h symhacks.h tls1.h ts.h txt_db.h ui.h ui_compat.h whrlpool.h x509.h x509_vfy.h x509v3.h) + foreach(file IN ITEMS ${FILES}) + file(DOWNLOAD https://raw.githubusercontent.com/herumi/cybozulib_ext/${CYBOZULIB_EXT_TAG}/include/openssl/${file} ${mcl_SOURCE_DIR}/include/cybozulib_ext/openssl/${file}) + message("download cybozulib_ext/openssl/" ${file}) + endforeach() + set(FILES mpir.lib mpirxx.lib mpirxx.pdb ssleay32.lib libeay32.lib mpir.pdb) + foreach(file IN ITEMS ${FILES}) + file(DOWNLOAD https://raw.githubusercontent.com/herumi/cybozulib_ext/${CYBOZULIB_EXT_TAG}/lib/mt/14/${file} ${mcl_SOURCE_DIR}/lib/mt/14/${file}) + message("download lib/mt/14/" ${file}) + endforeach() + if(MSVC) + include_directories( + ${mcl_SOURCE_DIR}/include/cybozulib_ext + ) + endif() + endif() +else() + if(MSVC) + include_directories( + ${mcl_SOURCE_DIR}/../cybozulib_ext/include + ) + endif() +endif() + +include_directories( + ${mcl_SOURCE_DIR}/include +) + +add_library(mcl STATIC ${SRCS}) +if(NOT MSVC) +add_library(mcl_dy SHARED ${SRCS}) +endif() + +file(GLOB MCL_HEADERS include/mcl/*.hpp include/mcl/*.h) +file(GLOB CYBOZULIB_HEADERS include/cybozu/*.hpp) + +install(TARGETS mcl DESTINATION lib) +if(NOT MSVC) +install(TARGETS mcl_dy DESTINATION lib) +endif() +install(FILES ${MCL_HEADERS} DESTINATION include/mcl) +install(FILES ${CYBOZULIB_HEADERS} DESTINATION include/cybozu) + +set(TEST_BASE fp_test ec_test fp_util_test window_method_test elgamal_test fp_tower_test gmp_test bn_test glv_test) +#set(TEST_BASE bn_test) +foreach(base IN ITEMS ${TEST_BASE}) + add_executable( + ${base} + test/${base}.cpp + ) + target_link_libraries( + ${base} + ${LIBS} + ) +endforeach() diff --git a/vendor/github.com/dexon-foundation/mcl/COPYRIGHT b/vendor/github.com/dexon-foundation/mcl/COPYRIGHT new file mode 100644 index 000000000..90e49b4bc --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/COPYRIGHT @@ -0,0 +1,47 @@ + +Copyright (c) 2015 MITSUNARI Shigeo +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. +Neither the name of the copyright owner nor the names of its contributors may +be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. +----------------------------------------------------------------------------- +ソースコード形式かバイナリ形式か、変更するかしないかを問わず、以下の条件を満た +す場合に限り、再頒布および使用が許可されます。 + +ソースコードを再頒布する場合、上記の著作権表示、本条件一覧、および下記免責条項 +を含めること。 +バイナリ形式で再頒布する場合、頒布物に付属のドキュメント等の資料に、上記の著作 +権表示、本条件一覧、および下記免責条項を含めること。 +書面による特別の許可なしに、本ソフトウェアから派生した製品の宣伝または販売促進 +に、著作権者の名前またはコントリビューターの名前を使用してはならない。 +本ソフトウェアは、著作権者およびコントリビューターによって「現状のまま」提供さ +れており、明示黙示を問わず、商業的な使用可能性、および特定の目的に対する適合性 +に関する暗黙の保証も含め、またそれに限定されない、いかなる保証もありません。 +著作権者もコントリビューターも、事由のいかんを問わず、 損害発生の原因いかんを +問わず、かつ責任の根拠が契約であるか厳格責任であるか(過失その他の)不法行為で +あるかを問わず、仮にそのような損害が発生する可能性を知らされていたとしても、 +本ソフトウェアの使用によって発生した(代替品または代用サービスの調達、使用の +喪失、データの喪失、利益の喪失、業務の中断も含め、またそれに限定されない)直接 +損害、間接損害、偶発的な損害、特別損害、懲罰的損害、または結果損害について、 +一切責任を負わないものとします。 diff --git a/vendor/github.com/dexon-foundation/mcl/Makefile b/vendor/github.com/dexon-foundation/mcl/Makefile new file mode 100644 index 000000000..fae248aba --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/Makefile @@ -0,0 +1,351 @@ +include common.mk +LIB_DIR=lib +OBJ_DIR=obj +EXE_DIR=bin +SRC_SRC=fp.cpp bn_c256.cpp bn_c384.cpp bn_c512.cpp she_c256.cpp +TEST_SRC=fp_test.cpp ec_test.cpp fp_util_test.cpp window_method_test.cpp elgamal_test.cpp fp_tower_test.cpp gmp_test.cpp bn_test.cpp bn384_test.cpp glv_test.cpp paillier_test.cpp she_test.cpp vint_test.cpp bn512_test.cpp ecdsa_test.cpp conversion_test.cpp +TEST_SRC+=bn_c256_test.cpp bn_c384_test.cpp bn_c384_256_test.cpp bn_c512_test.cpp she_c256_test.cpp she_c384_test.cpp +TEST_SRC+=aggregate_sig_test.cpp array_test.cpp +TEST_SRC+=bls12_test.cpp +TEST_SRC+=ecdsa_c_test.cpp +ifeq ($(CPU),x86-64) + MCL_USE_XBYAK?=1 + TEST_SRC+=mont_fp_test.cpp sq_test.cpp + ifeq ($(USE_LOW_ASM),1) + TEST_SRC+=low_test.cpp + endif + ifeq ($(MCL_USE_XBYAK),1) + TEST_SRC+=fp_generator_test.cpp + endif +endif +SAMPLE_SRC=bench.cpp ecdh.cpp random.cpp rawbench.cpp vote.cpp pairing.cpp large.cpp tri-dh.cpp bls_sig.cpp pairing_c.c she_smpl.cpp + +ifneq ($(MCL_MAX_BIT_SIZE),) + CFLAGS+=-DMCL_MAX_BIT_SIZE=$(MCL_MAX_BIT_SIZE) +endif +ifeq ($(MCL_USE_XBYAK),0) + CFLAGS+=-DMCL_DONT_USE_XBYAK +endif +SHARE_BASENAME_SUF?=_dy +################################################################## +MCL_LIB=$(LIB_DIR)/libmcl.a +MCL_SNAME=mcl$(SHARE_BASENAME_SUF) +BN256_SNAME=mclbn256$(SHARE_BASENAME_SUF) +BN384_SNAME=mclbn384$(SHARE_BASENAME_SUF) +BN384_256_SNAME=mclbn384_256$(SHARE_BASENAME_SUF) +BN512_SNAME=mclbn512$(SHARE_BASENAME_SUF) +SHE256_SNAME=mclshe256$(SHARE_BASENAME_SUF) +MCL_SLIB=$(LIB_DIR)/lib$(MCL_SNAME).$(LIB_SUF) +BN256_LIB=$(LIB_DIR)/libmclbn256.a +BN256_SLIB=$(LIB_DIR)/lib$(BN256_SNAME).$(LIB_SUF) +BN384_LIB=$(LIB_DIR)/libmclbn384.a +BN384_SLIB=$(LIB_DIR)/lib$(BN384_SNAME).$(LIB_SUF) +BN384_256_LIB=$(LIB_DIR)/libmclbn384_256.a +BN384_256_SLIB=$(LIB_DIR)/lib$(BN384_256_SNAME).$(LIB_SUF) +BN512_LIB=$(LIB_DIR)/libmclbn512.a +BN512_SLIB=$(LIB_DIR)/lib$(BN512_SNAME).$(LIB_SUF) +SHE256_LIB=$(LIB_DIR)/libmclshe256.a +SHE384_LIB=$(LIB_DIR)/libmclshe384.a +ECDSA_LIB=$(LIB_DIR)/libmclecdsa.a +all: $(MCL_LIB) $(MCL_SLIB) $(BN256_LIB) $(BN256_SLIB) $(BN384_LIB) $(BN384_SLIB) $(BN384_256_LIB) $(BN384_256_SLIB) $(BN512_LIB) $(BN512_SLIB) $(SHE256_LIB) $(SHE384_lib) $(ECDSA_LIB) + +#LLVM_VER=-3.8 +LLVM_LLC=llc$(LLVM_VER) +LLVM_OPT=opt$(LLVM_VER) +LLVM_OPT_VERSION=$(shell $(LLVM_OPT) --version 2>/dev/null | awk '/version/ {print $$3}') +GEN_EXE=src/gen +# incompatibility between llvm 3.4 and the later version +ifneq ($(LLVM_OPT_VERSION),) +ifeq ($(shell expr $(LLVM_OPT_VERSION) \< 3.5.0),1) + GEN_EXE_OPT=-old +endif +endif +ifeq ($(OS),mac) + ASM_SRC_PATH_NAME=src/asm/$(CPU)mac +else + ASM_SRC_PATH_NAME=src/asm/$(CPU) +endif +ifneq ($(CPU),) + ASM_SRC=$(ASM_SRC_PATH_NAME).s +endif +ASM_OBJ=$(OBJ_DIR)/$(CPU).o +LIB_OBJ=$(OBJ_DIR)/fp.o +BN256_OBJ=$(OBJ_DIR)/bn_c256.o +BN384_OBJ=$(OBJ_DIR)/bn_c384.o +BN384_256_OBJ=$(OBJ_DIR)/bn_c384_256.o +BN512_OBJ=$(OBJ_DIR)/bn_c512.o +SHE256_OBJ=$(OBJ_DIR)/she_c256.o +SHE384_OBJ=$(OBJ_DIR)/she_c384.o +ECDSA_OBJ=$(OBJ_DIR)/ecdsa_c.o +FUNC_LIST=src/func.list +ifeq ($(findstring $(OS),mingw64/cygwin),) + MCL_USE_LLVM?=1 +else + MCL_USE_LLVM=0 +endif +ifeq ($(MCL_USE_LLVM),1) + CFLAGS+=-DMCL_USE_LLVM=1 + LIB_OBJ+=$(ASM_OBJ) + # special case for intel with bmi2 + ifeq ($(INTEL),1) + LIB_OBJ+=$(OBJ_DIR)/$(CPU).bmi2.o + endif +endif +LLVM_SRC=src/base$(BIT).ll + +# CPU is used for llvm +# see $(LLVM_LLC) --version +LLVM_FLAGS=-march=$(CPU) -relocation-model=pic #-misched=ilpmax +LLVM_FLAGS+=-pre-RA-sched=list-ilp -max-sched-reorder=128 -mattr=-sse + +#HAS_BMI2=$(shell cat "/proc/cpuinfo" | grep bmi2 >/dev/null && echo "1") +#ifeq ($(HAS_BMI2),1) +# LLVM_FLAGS+=-mattr=bmi2 +#endif + +ifeq ($(USE_LOW_ASM),1) + LOW_ASM_OBJ=$(LOW_ASM_SRC:.asm=.o) + LIB_OBJ+=$(LOW_ASM_OBJ) +endif + +ifeq ($(UPDATE_ASM),1) + ASM_SRC_DEP=$(LLVM_SRC) + ASM_BMI2_SRC_DEP=src/base$(BIT).bmi2.ll +else + ASM_SRC_DEP= + ASM_BMI2_SRC_DEP= +endif + +ifneq ($(findstring $(OS),mac/mingw64),) + BN256_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib + BN384_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib + BN384_256_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib + BN512_SLIB_LDFLAGS+=-l$(MCL_SNAME) -L./lib +endif +ifeq ($(OS),mingw64) + MCL_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(MCL_SNAME).a + BN256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN256_SNAME).a + BN384_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN384_SNAME).a + BN384_256_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN384_256_SNAME).a + BN512_SLIB_LDFLAGS+=-Wl,--out-implib,$(LIB_DIR)/lib$(BN512_SNAME).a +endif + +$(MCL_LIB): $(LIB_OBJ) + $(AR) $@ $(LIB_OBJ) + +$(MCL_SLIB): $(LIB_OBJ) + $(PRE)$(CXX) -o $@ $(LIB_OBJ) -shared $(LDFLAGS) $(MCL_SLIB_LDFLAGS) + +$(BN256_LIB): $(BN256_OBJ) + $(AR) $@ $(BN256_OBJ) + +$(SHE256_LIB): $(SHE256_OBJ) + $(AR) $@ $(SHE256_OBJ) + +$(SHE384_LIB): $(SHE384_OBJ) + $(AR) $@ $(SHE384_OBJ) + +$(ECDSA_LIB): $(ECDSA_OBJ) + $(AR) $@ $(ECDSA_OBJ) + +$(BN256_SLIB): $(BN256_OBJ) $(MCL_SLIB) + $(PRE)$(CXX) -o $@ $(BN256_OBJ) -shared $(LDFLAGS) $(BN256_SLIB_LDFLAGS) + +$(BN384_LIB): $(BN384_OBJ) + $(AR) $@ $(BN384_OBJ) + +$(BN384_256_LIB): $(BN384_256_OBJ) + $(AR) $@ $(BN384_256_OBJ) + +$(BN512_LIB): $(BN512_OBJ) + $(AR) $@ $(BN512_OBJ) + +$(BN384_SLIB): $(BN384_OBJ) $(MCL_SLIB) + $(PRE)$(CXX) -o $@ $(BN384_OBJ) -shared $(LDFLAGS) $(BN384_SLIB_LDFLAGS) + +$(BN384_256_SLIB): $(BN384_256_OBJ) $(MCL_SLIB) + $(PRE)$(CXX) -o $@ $(BN384_256_OBJ) -shared $(LDFLAGS) $(BN384_256_SLIB_LDFLAGS) + +$(BN512_SLIB): $(BN512_OBJ) $(MCL_SLIB) + $(PRE)$(CXX) -o $@ $(BN512_OBJ) -shared $(LDFLAGS) $(BN512_SLIB_LDFLAGS) + +$(ASM_OBJ): $(ASM_SRC) + $(PRE)$(CXX) -c $< -o $@ $(CFLAGS) + +$(ASM_SRC): $(ASM_SRC_DEP) + $(LLVM_OPT) -O3 -o - $< -march=$(CPU) | $(LLVM_LLC) -O3 -o $@ $(LLVM_FLAGS) + +$(LLVM_SRC): $(GEN_EXE) $(FUNC_LIST) + $(GEN_EXE) $(GEN_EXE_OPT) -f $(FUNC_LIST) > $@ + +$(ASM_SRC_PATH_NAME).bmi2.s: $(ASM_BMI2_SRC_DEP) + $(LLVM_OPT) -O3 -o - $< -march=$(CPU) | $(LLVM_LLC) -O3 -o $@ $(LLVM_FLAGS) -mattr=bmi2 + +$(OBJ_DIR)/$(CPU).bmi2.o: $(ASM_SRC_PATH_NAME).bmi2.s + $(PRE)$(CXX) -c $< -o $@ $(CFLAGS) + +src/base$(BIT).bmi2.ll: $(GEN_EXE) + $(GEN_EXE) $(GEN_EXE_OPT) -f $(FUNC_LIST) -s bmi2 > $@ + +src/base64m.ll: $(GEN_EXE) + $(GEN_EXE) $(GEN_EXE_OPT) -wasm > $@ + +$(FUNC_LIST): $(LOW_ASM_SRC) +ifeq ($(USE_LOW_ASM),1) + $(shell awk '/global/ { print $$2}' $(LOW_ASM_SRC) > $(FUNC_LIST)) + $(shell awk '/proc/ { print $$2}' $(LOW_ASM_SRC) >> $(FUNC_LIST)) +else + $(shell touch $(FUNC_LIST)) +endif + +$(GEN_EXE): src/gen.cpp src/llvm_gen.hpp + $(CXX) -o $@ $< $(CFLAGS) -O0 + +asm: $(LLVM_SRC) + $(LLVM_OPT) -O3 -o - $(LLVM_SRC) | $(LLVM_LLC) -O3 $(LLVM_FLAGS) -x86-asm-syntax=intel + +$(LOW_ASM_OBJ): $(LOW_ASM_SRC) + $(ASM) $< + +ifeq ($(OS),mac) + MAC_GO_LDFLAGS="-ldflags=-s" +endif +# set PATH for mingw, set LD_RUN_PATH is for other env +test_go256: $(MCL_SLIB) $(BN256_SLIB) +# cd ffi/go/mcl && env PATH="$$PATH:../../../lib" LD_RUN_PATH="../../../lib" CGO_LDFLAGS="-L../../../lib -l$(BN256_SNAME) -l$(MCL_SNAME) -lgmpxx -lgmp -lcrypto -lstdc++" go test $(MAC_GO_LDFLAGS) -tags bn256 . + cd ffi/go/mcl && env PATH="$$PATH:../../../lib" LD_RUN_PATH="../../../lib" go test $(MAC_GO_LDFLAGS) -tags bn256 . + +test_go384: $(MCL_SLIB) $(BN384_SLIB) +# cd ffi/go/mcl && env LD_RUN_PATH="../../../lib" CGO_CFLAGS="-I../../../include" CGO_LDFLAGS="-L../../../lib -l$(BN384_SNAME) -l$(MCL_SNAME) -lgmpxx -lgmp -lcrypto -lstdc++" go test $(MAC_GO_LDFLAGS) . + cd ffi/go/mcl && env PATH="$$PATH:../../../lib" LD_RUN_PATH="../../../lib" go test $(MAC_GO_LDFLAGS) -tags bn384 . + +test_go: + $(MAKE) test_go256 + $(MAKE) test_go384 + +test_java: + $(MAKE) -C ffi/java test + +################################################################## + +VPATH=test sample src + +.SUFFIXES: .cpp .d .exe .c .o + +$(OBJ_DIR)/%.o: %.cpp + $(PRE)$(CXX) $(CFLAGS) -c $< -o $@ -MMD -MP -MF $(@:.o=.d) + +$(OBJ_DIR)/%.o: %.c + $(PRE)$(CC) $(CFLAGS) -c $< -o $@ -MMD -MP -MF $(@:.o=.d) + +$(EXE_DIR)/%.exe: $(OBJ_DIR)/%.o $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(MCL_LIB) $(LDFLAGS) + +$(EXE_DIR)/bn_c256_test.exe: $(OBJ_DIR)/bn_c256_test.o $(BN256_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BN256_LIB) $(MCL_LIB) $(LDFLAGS) + +$(EXE_DIR)/bn_c384_test.exe: $(OBJ_DIR)/bn_c384_test.o $(BN384_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BN384_LIB) $(MCL_LIB) $(LDFLAGS) + +$(EXE_DIR)/bn_c384_256_test.exe: $(OBJ_DIR)/bn_c384_256_test.o $(BN384_256_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BN384_256_LIB) $(MCL_LIB) $(LDFLAGS) + +$(EXE_DIR)/bn_c512_test.exe: $(OBJ_DIR)/bn_c512_test.o $(BN512_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(BN512_LIB) $(MCL_LIB) $(LDFLAGS) + +$(EXE_DIR)/pairing_c.exe: $(OBJ_DIR)/pairing_c.o $(BN256_LIB) $(MCL_LIB) + $(PRE)$(CC) $< -o $@ $(BN256_LIB) $(MCL_LIB) $(LDFLAGS) -lstdc++ + +$(EXE_DIR)/she_c256_test.exe: $(OBJ_DIR)/she_c256_test.o $(SHE256_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(SHE256_LIB) $(MCL_LIB) $(LDFLAGS) + +$(EXE_DIR)/she_c384_test.exe: $(OBJ_DIR)/she_c384_test.o $(SHE384_LIB) $(MCL_LIB) + $(PRE)$(CXX) $< -o $@ $(SHE384_LIB) $(MCL_LIB) $(LDFLAGS) + +$(EXE_DIR)/ecdsa_c_test.exe: $(OBJ_DIR)/ecdsa_c_test.o $(ECDSA_LIB) $(MCL_LIB) src/ecdsa_c.cpp include/mcl/ecdsa.hpp include/mcl/ecdsa.h + $(PRE)$(CXX) $< -o $@ $(ECDSA_LIB) $(MCL_LIB) $(LDFLAGS) + +SAMPLE_EXE=$(addprefix $(EXE_DIR)/,$(addsuffix .exe,$(basename $(SAMPLE_SRC)))) +sample: $(SAMPLE_EXE) $(MCL_LIB) + +TEST_EXE=$(addprefix $(EXE_DIR)/,$(TEST_SRC:.cpp=.exe)) +test: $(TEST_EXE) + @echo test $(TEST_EXE) + @sh -ec 'for i in $(TEST_EXE); do $$i|grep "ctest:name"; done' > result.txt + @grep -v "ng=0, exception=0" result.txt; if [ $$? -eq 1 ]; then echo "all unit tests succeed"; else exit 1; fi + +EMCC_OPT=-I./include -I./src -Wall -Wextra +EMCC_OPT+=-O3 -DNDEBUG -DMCLSHE_WIN_SIZE=8 +EMCC_OPT+=-s WASM=1 -s NO_EXIT_RUNTIME=1 -s MODULARIZE=1 #-s ASSERTIONS=1 +EMCC_OPT+=-DCYBOZU_MINIMUM_EXCEPTION +EMCC_OPT+=-s ABORTING_MALLOC=0 +SHE_C_DEP=src/fp.cpp src/she_c_impl.hpp include/mcl/she.hpp include/mcl/she.h Makefile +MCL_C_DEP=src/fp.cpp src/bn_c_impl.hpp include/mcl/bn.hpp include/mcl/bn.h Makefile +ifeq ($(MCL_USE_LLVM),2) + EMCC_OPT+=src/base64m.ll -DMCL_USE_LLVM + SHE_C_DEP+=src/base64m.ll +endif +../she-wasm/she_c.js: src/she_c256.cpp $(SHE_C_DEP) + emcc -o $@ src/fp.cpp src/she_c256.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=256 -s TOTAL_MEMORY=67108864 -s DISABLE_EXCEPTION_CATCHING=0 + +../she-wasm/she_c384.js: src/she_c384.cpp $(SHE_C_DEP) + emcc -o $@ src/fp.cpp src/she_c384.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=384 -s TOTAL_MEMORY=67108864 -s DISABLE_EXCEPTION_CATCHING=0 + +../mcl-wasm/mcl_c.js: src/bn_c256.cpp $(MCL_C_DEP) + emcc -o $@ src/fp.cpp src/bn_c256.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=256 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -fno-exceptions -MD -MP -MF obj/mcl_c.d + +../mcl-wasm/mcl_c512.js: src/bn_c512.cpp $(MCL_C_DEP) + emcc -o $@ src/fp.cpp src/bn_c512.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=512 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -fno-exceptions + +../ecdsa-wasm/ecdsa_c.js: src/ecdsa_c.cpp src/fp.cpp include/mcl/ecdsa.hpp include/mcl/ecdsa.h Makefile + emcc -o $@ src/fp.cpp src/ecdsa_c.cpp $(EMCC_OPT) -DMCL_MAX_BIT_SIZE=256 -DMCL_USE_WEB_CRYPTO_API -s DISABLE_EXCEPTION_CATCHING=1 -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -fno-exceptions + +mcl-wasm: + $(MAKE) ../mcl-wasm/mcl_c.js + $(MAKE) ../mcl-wasm/mcl_c512.js + +she-wasm: + $(MAKE) ../she-wasm/she_c.js + $(MAKE) ../she-wasm/she_c384.js + +ecdsa-wasm: + $(MAKE) ../ecdsa-wasm/ecdsa_c.js + +# test +bin/emu: + $(CXX) -g -o $@ src/fp.cpp src/bn_c256.cpp test/bn_c256_test.cpp -DMCL_DONT_USE_XBYAK -DMCL_DONT_USE_OPENSSL -DMCL_USE_VINT -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_64BIT_PORTABLE -DMCL_VINT_FIXED_BUFFER -DMCL_MAX_BIT_SIZE=256 -I./include +bin/pairing_c_min.exe: sample/pairing_c.c include/mcl/vint.hpp src/fp.cpp include/mcl/bn.hpp +# $(CXX) -o $@ sample/pairing_c.c src/fp.cpp src/bn_c256.cpp -O2 -g -I./include -fno-exceptions -fno-rtti -fno-threadsafe-statics -DMCL_DONT_USE_XBYAK -DMCL_DONT_USE_OPENSSL -DMCL_USE_VINT -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_FIXED_BUFFER -DCYBOZU_DONT_USE_EXCEPTION -DCYBOZU_DONT_USE_STRING -DMCL_DONT_USE_CSPRNG -DMCL_MAX_BIT_SIZE=256 -DMCL_VINT_64BIT_PORTABLE -DNDEBUG -pg + $(CXX) -o $@ sample/pairing_c.c src/fp.cpp src/bn_c256.cpp -O2 -g -I./include -fno-threadsafe-statics -DMCL_DONT_USE_XBYAK -DMCL_DONT_USE_OPENSSL -DMCL_USE_VINT -DMCL_SIZEOF_UNIT=8 -DMCL_VINT_FIXED_BUFFER -DMCL_DONT_USE_CSPRNG -DMCL_MAX_BIT_SIZE=256 -DMCL_VINT_64BIT_PORTABLE -DNDEBUG + +make_tbl: + $(MAKE) ../bls/src/qcoeff-bn254.hpp + +../bls/src/qcoeff-bn254.hpp: $(MCL_LIB) misc/precompute.cpp + $(CXX) -o misc/precompute misc/precompute.cpp $(CFLAGS) $(MCL_LIB) $(LDFLAGS) + ./misc/precompute > ../bls/src/qcoeff-bn254.hpp + +update_xbyak: + cp -a ../xbyak/xbyak/xbyak.h ../xbyak/xbyak/xbyak_util.h ../xbyak/xbyak/xbyak_mnemonic.h src/xbyak/ + + +clean: + $(RM) $(LIB_DIR)/*.a $(EXE_DIR)/*.$(LIB_SUF) $(OBJ_DIR)/*.o $(OBJ_DIR)/*.d $(EXE_DIR)/*.exe $(GEN_EXE) $(ASM_OBJ) $(LIB_OBJ) $(BN256_OBJ) $(BN384_OBJ) $(BN512_OBJ) $(LLVM_SRC) $(FUNC_LIST) src/*.ll lib/*.a + +ALL_SRC=$(SRC_SRC) $(TEST_SRC) $(SAMPLE_SRC) +DEPEND_FILE=$(addprefix $(OBJ_DIR)/, $(addsuffix .d,$(basename $(ALL_SRC)))) +-include $(DEPEND_FILE) + +PREFIX?=/usr/local +install: lib/libmcl.a lib/libmcl$(SHARE_BASENAME_SUF).$(LIB_SUF) + $(MKDIR) $(PREFIX)/include/mcl + cp -a include/mcl/ $(PREFIX)/include/ + cp -a include/cybozu/ $(PREFIX)/include/ + $(MKDIR) $(PREFIX)/lib + cp -a lib/libmcl.a lib/libmcl$(SHARE_BASENAME_SUF).$(LIB_SUF) $(PREFIX)/lib/ + +.PHONY: test mcl-wasm she-wasm bin/emu + +# don't remove these files automatically +.SECONDARY: $(addprefix $(OBJ_DIR)/, $(ALL_SRC:.cpp=.o)) + diff --git a/vendor/github.com/dexon-foundation/mcl/bench.txt b/vendor/github.com/dexon-foundation/mcl/bench.txt new file mode 100644 index 000000000..35e47dca5 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/bench.txt @@ -0,0 +1,114 @@ +----------------------------------------------------------------------------- +Core i7-7700@3.6GHz Firefox 58.0.1(64-bit) + BN254 Fp381_1 Fp462 +op msec +Fr::setByCSPRNG 0.022 0.021 0.019 +pairing 2.446 7.353 14.596 +millerLoop 1.467 4.219 8.598 +finalExp 0.97 3.127 6.005 +precomputedMillerLoop 1.087 3.171 6.305 +G1::add 0.007 0.01 0.014 +G1::dbl 0.005 0.007 0.011 +G1::mul 0.479 1.529 3.346 +G2::add 0.013 0.022 0.033 +G2::dbl 0.01 0.016 0.025 +G2::mul 0.989 2.955 5.921 +hashAndMapToG1 0.135 0.309 0.76 +hashAndMapToG2 2.14 6.44 14.249 +Fr::add 0.004 0.003 0.003 +Fr::mul 0.004 0.004 0.005 +Fr::sqr 0.003 0.003 0.004 +Fr::inv 0.025 0.038 0.05 +GT::add 0.005 0.004 0.005 +GT::mul 0.016 0.027 0.041 +GT::sqr 0.012 0.018 0.028 +GT::inv 0.051 0.081 0.122 + +----------------------------------------------------------------------------- +iPhone7 iOS 11.2.1 Safari/604.1 + BN254 Fp381_1 Fp462 +op msec +Fr::setByCSPRNG 0.041 0.038 0.154 +pairing 3.9 11.752 22.578 +millerLoop 2.29 6.55 13.067 +finalExp 1.501 4.741 9.016 +precomputedMillerLoop 1.675 4.818 9.492 +G1::add 0.006 0.015 0.018 +G1::dbl 0.005 0.01 0.019 +G1::mul 0.843 2.615 5.339 +G2::add 0.015 0.03 0.048 +G2::dbl 0.011 0.022 0.034 +G2::mul 1.596 4.581 9.077 +hashAndMapToG1 0.212 0.507 1.201 +hashAndMapToG2 3.486 9.93 21.817 +Fr::add 0.002 0.002 0.002 +Fr::mul 0.002 0.003 0.003 +Fr::sqr 0.002 0.003 0.004 +Fr::inv 0.037 0.062 0.078 +GT::add 0.003 0.003 0.003 +GT::mul 0.021 0.037 0.058 +GT::sqr 0.014 0.026 0.04 +GT::inv 0.074 0.131 0.19 +----------------------------------------------------------------------------- +Core i7-7700@3.6GHz Linux gcc.5.4.0 + + BN254 Fp381_1 Fp462 +G1::mulCT 202.807Kclk 597.410Kclk 1.658Mclk +G1::mulCTsmall 200.968Kclk 596.074Kclk 1.650Mclk +G1::mul 185.935Kclk 555.147Kclk 1.495Mclk +G1::mulsmall 1.856Kclk 3.740Kclk 8.054Kclk +G1::add 866.89 clk 1.710Kclk 3.663Kclk +G1::dbl 798.60 clk 1.770Kclk 3.755Kclk +G2::mulCT 391.655Kclk 1.351Mclk 3.102Mclk +G2::mulCTsmall 369.134Kclk 1.358Mclk 3.105Mclk +G2::mul 400.098Kclk 1.277Mclk 3.009Mclk +G2::mulsmall 5.774Kclk 12.806Kclk 25.374Kclk +G2::add 2.696Kclk 7.547Kclk 14.683Kclk +G2::dbl 2.600Kclk 5.366Kclk 10.436Kclk +GT::pow 727.157Kclk 1.991Mclk 4.364Mclk +hashAndMapToG1 27.953Kclk 87.291Kclk 200.972Kclk +hashAndMapToG2 775.186Kclk 2.629Mclk 6.937Mclk +Fp::add 11.48 clk 69.54 clk 21.36 clk +Fp::mul 63.11 clk 134.90 clk 303.75 clk +Fp::sqr 64.39 clk 134.29 clk 305.38 clk +Fp::inv 2.302Kclk 4.185Kclk 5.485Kclk +GT::add 180.93 clk 247.70 clk 256.55 clk +GT::mul 5.278Kclk 10.887Kclk 19.844Kclk +GT::sqr 3.666Kclk 7.444Kclk 13.694Kclk +GT::inv 11.322Kclk 22.480Kclk 41.796Kclk +pairing 1.044Mclk 3.445Mclk 7.789Mclk +millerLoop 634.214Kclk 1.913Mclk 4.466Mclk +finalExp 423.413Kclk 1.535Mclk 3.328Mclk +precomputedML 479.849Kclk 1.461Mclk 3.299Mclk +----------------------------------------------------------------------------- + +1.2GHz ARM Cortex-A53 [HiKey] Linux gcc 4.9.2 + + BN254 Fp381_1 Fp462 +G1::mulCT 858.149usec 2.780msec 8.507msec +G1::mulCTsmall 854.535usec 2.773msec 8.499msec +G1::mul 743.100usec 2.484msec 7.536msec +G1::mulsmall 7.680usec 16.528usec 41.818usec +G1::add 3.347usec 7.363usec 18.544usec +G1::dbl 3.294usec 7.351usec 18.472usec +G2::mulCT 1.627msec 5.083msec 12.142msec +G2::mulCTsmall 1.534msec 5.124msec 12.125msec +G2::mul 1.677msec 4.806msec 11.757msec +G2::mulsmall 23.581usec 48.504usec 96.780usec +G2::add 10.751usec 27.759usec 54.392usec +G2::dbl 10.076usec 20.625usec 42.032usec +GT::pow 2.662msec 7.091msec 14.042msec +hashAndMapToG1 111.256usec 372.665usec 1.031msec +hashAndMapToG2 3.199msec 10.168msec 27.391msec +Fp::add 27.19nsec 38.02nsec 45.68nsec +Fp::mul 279.17nsec 628.44nsec 1.662usec +Fp::sqr 276.56nsec 651.67nsec 1.675usec +Fp::inv 9.743usec 14.364usec 18.116usec +GT::add 373.18nsec 530.62nsec 625.26nsec +GT::mul 19.557usec 38.623usec 63.111usec +GT::sqr 13.345usec 26.218usec 43.008usec +GT::inv 44.119usec 84.581usec 153.046usec +pairing 3.913msec 12.606msec 26.818msec +millerLoop 2.402msec 7.202msec 15.711msec +finalExp 1.506msec 5.395msec 11.098msec +precomputedML 1.815msec 5.447msec 11.094msec diff --git a/vendor/github.com/dexon-foundation/mcl/common.mk b/vendor/github.com/dexon-foundation/mcl/common.mk new file mode 100644 index 000000000..a05f5c9cb --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/common.mk @@ -0,0 +1,117 @@ +GCC_VER=$(shell $(PRE)$(CC) -dumpversion) +UNAME_S=$(shell uname -s) +ifeq ($(UNAME_S),Linux) + OS=Linux +endif +ifeq ($(findstring MINGW64,$(UNAME_S)),MINGW64) + OS=mingw64 + CFLAGS+=-D__USE_MINGW_ANSI_STDIO=1 +endif +ifeq ($(findstring CYGWIN,$(UNAME_S)),CYGWIN) + OS=cygwin +endif +ifeq ($(UNAME_S),Darwin) + OS=mac + ARCH=x86_64 + LIB_SUF=dylib + OPENSSL_DIR?=/usr/local/opt/openssl + CFLAGS+=-I$(OPENSSL_DIR)/include + LDFLAGS+=-L$(OPENSSL_DIR)/lib + GMP_DIR?=/usr/local/opt/gmp + CFLAGS+=-I$(GMP_DIR)/include + LDFLAGS+=-L$(GMP_DIR)/lib +else + LIB_SUF=so +endif +ARCH?=$(shell uname -m) +ifneq ($(findstring $(ARCH),x86_64/amd64),) + CPU=x86-64 + INTEL=1 + ifeq ($(findstring $(OS),mingw64/cygwin),) + GCC_EXT=1 + endif + BIT=64 + BIT_OPT=-m64 + #LOW_ASM_SRC=src/asm/low_x86-64.asm + #ASM=nasm -felf64 +endif +ifeq ($(ARCH),x86) + CPU=x86 + INTEL=1 + BIT=32 + BIT_OPT=-m32 + #LOW_ASM_SRC=src/asm/low_x86.asm +endif +ifeq ($(ARCH),armv7l) + CPU=arm + BIT=32 + #LOW_ASM_SRC=src/asm/low_arm.s +endif +ifeq ($(ARCH),aarch64) + CPU=aarch64 + BIT=64 +endif +ifeq ($(findstring $(OS),mac/mingw64),) + LDFLAGS+=-lrt +endif + +CP=cp -f +AR=ar r +MKDIR=mkdir -p +RM=rm -rf + +ifeq ($(DEBUG),1) + ifeq ($(GCC_EXT),1) + CFLAGS+=-fsanitize=address + LDFLAGS+=-fsanitize=address + endif +else + CFLAGS_OPT+=-fomit-frame-pointer -DNDEBUG + ifeq ($(CXX),clang++) + CFLAGS_OPT+=-O3 + else + ifeq ($(shell expr $(GCC_VER) \> 4.6.0),1) + CFLAGS_OPT+=-Ofast + else + CFLAGS_OPT+=-O3 + endif + endif + ifeq ($(MARCH),) + ifeq ($(INTEL),1) + CFLAGS_OPT+=-march=native + endif + else + CFLAGS_OPT+=$(MARCH) + endif +endif +CFLAGS_WARN=-Wall -Wextra -Wformat=2 -Wcast-qual -Wcast-align -Wwrite-strings -Wfloat-equal -Wpointer-arith +CFLAGS+=-g3 +INC_OPT=-I include -I test +CFLAGS+=$(CFLAGS_WARN) $(BIT_OPT) $(INC_OPT) +DEBUG=0 +CFLAGS_OPT_USER?=$(CFLAGS_OPT) +ifeq ($(DEBUG),0) +CFLAGS+=$(CFLAGS_OPT_USER) +endif +CFLAGS+=$(CFLAGS_USER) +MCL_USE_GMP?=1 +MCL_USE_OPENSSL?=1 +ifeq ($(MCL_USE_GMP),0) + CFLAGS+=-DMCL_USE_VINT +endif +ifneq ($(MCL_SIZEOF_UNIT),) + CFLAGS+=-DMCL_SIZEOF_UNIT=$(MCL_SIZEOF_UNIT) +endif +ifeq ($(MCL_USE_OPENSSL),0) + CFLAGS+=-DMCL_DONT_USE_OPENSSL +endif +ifeq ($(MCL_USE_GMP),1) + GMP_LIB=-lgmp -lgmpxx +endif +ifeq ($(MCL_USE_OPENSSL),1) + OPENSSL_LIB=-lcrypto +endif +LDFLAGS+=$(GMP_LIB) $(OPENSSL_LIB) $(BIT_OPT) $(LDFLAGS_USER) + +CFLAGS+=-fPIC + diff --git a/vendor/github.com/dexon-foundation/mcl/common.props b/vendor/github.com/dexon-foundation/mcl/common.props new file mode 100644 index 000000000..912f39e30 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/common.props @@ -0,0 +1,26 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ImportGroup Label="PropertySheets" /> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup> + <OutDir>$(SolutionDir)bin\</OutDir> + </PropertyGroup> + <ItemDefinitionGroup> + <ClCompile> + <AdditionalIncludeDirectories>$(SolutionDir)../cybozulib/include;$(SolutionDir)../cybozulib_ext/include;$(SolutionDir)include;$(SolutionDir)../xbyak</AdditionalIncludeDirectories> + </ClCompile> + </ItemDefinitionGroup> + <ItemDefinitionGroup> + <ClCompile> + <WarningLevel>Level4</WarningLevel> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <PrecompiledHeaderFile /> + <PrecompiledHeaderOutputFile /> + <PreprocessorDefinitions>_MBCS;%(PreprocessorDefinitions);NOMINMAX</PreprocessorDefinitions> + </ClCompile> + <Link> + <AdditionalLibraryDirectories>$(SolutionDir)../cybozulib_ext/lib;$(SolutionDir)lib</AdditionalLibraryDirectories> + </Link> + </ItemDefinitionGroup> + <ItemGroup /> +</Project> diff --git a/vendor/github.com/dexon-foundation/mcl/debug.props b/vendor/github.com/dexon-foundation/mcl/debug.props new file mode 100644 index 000000000..1553ae0dc --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/debug.props @@ -0,0 +1,14 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ImportGroup Label="PropertySheets" /> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup> + <TargetName>$(ProjectName)d</TargetName> + </PropertyGroup> + <ItemDefinitionGroup> + <ClCompile> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + </ItemDefinitionGroup> + <ItemGroup /> +</Project>
\ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/array.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/array.hpp new file mode 100644 index 000000000..30df3667d --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/array.hpp @@ -0,0 +1,197 @@ +#pragma once + +/** + @file + @brief scoped array and aligned array + + @author MITSUNARI Shigeo(@herumi) +*/ +#include <new> +#include <utility> +#ifdef _WIN32 + #include <malloc.h> +#else + #include <stdlib.h> +#endif +#include <cybozu/inttype.hpp> + +namespace cybozu { + +inline void *AlignedMalloc(size_t size, size_t alignment) +{ +#ifdef _WIN32 + return _aligned_malloc(size, alignment); +#else + void *p; + int ret = posix_memalign(&p, alignment, size); + return (ret == 0) ? p : 0; +#endif +} + +inline void AlignedFree(void *p) +{ +#ifdef _WIN32 + if (p == 0) return; + _aligned_free(p); +#else + free(p); +#endif +} + +template<class T> +class ScopedArray { + T *p_; + size_t size_; + ScopedArray(const ScopedArray&); + void operator=(const ScopedArray&); +public: + explicit ScopedArray(size_t size) + : p_(new T[size]) + , size_(size) + { + } + ~ScopedArray() + { + delete[] p_; + } + T& operator[](size_t idx) CYBOZU_NOEXCEPT { return p_[idx]; } + const T& operator[](size_t idx) const CYBOZU_NOEXCEPT { return p_[idx]; } + size_t size() const CYBOZU_NOEXCEPT { return size_; } + bool empty() const CYBOZU_NOEXCEPT { return size_ == 0; } + T* begin() CYBOZU_NOEXCEPT { return p_; } + T* end() CYBOZU_NOEXCEPT { return p_ + size_; } + const T* begin() const CYBOZU_NOEXCEPT { return p_; } + const T* end() const CYBOZU_NOEXCEPT { return p_ + size_; } + T* data() CYBOZU_NOEXCEPT { return p_; } + const T* data() const CYBOZU_NOEXCEPT { return p_; } +}; + +/** + T must be POD type + 16byte aligment array +*/ +template<class T, size_t N = 16, bool defaultDoClear = true> +class AlignedArray { + T *p_; + size_t size_; + size_t allocSize_; + T *alloc(size_t size) const + { + T *p = static_cast<T*>(AlignedMalloc(size * sizeof(T), N)); + if (p == 0) throw std::bad_alloc(); + return p; + } + void copy(T *dst, const T *src, size_t n) const + { + for (size_t i = 0; i < n; i++) dst[i] = src[i]; + } + void setZero(T *p, size_t n) const + { + for (size_t i = 0; i < n; i++) p[i] = 0; + } + /* + alloc allocN and copy [p, p + copyN) to new p_ + don't modify size_ + */ + void allocCopy(size_t allocN, const T *p, size_t copyN) + { + T *q = alloc(allocN); + copy(q, p, copyN); + AlignedFree(p_); + p_ = q; + allocSize_ = allocN; + } +public: + /* + don't clear buffer with zero if doClear is false + */ + explicit AlignedArray(size_t size = 0, bool doClear = defaultDoClear) + : p_(0) + , size_(0) + , allocSize_(0) + { + resize(size, doClear); + } + AlignedArray(const AlignedArray& rhs) + : p_(0) + , size_(0) + , allocSize_(0) + { + *this = rhs; + } + AlignedArray& operator=(const AlignedArray& rhs) + { + if (allocSize_ < rhs.size_) { + allocCopy(rhs.size_, rhs.p_, rhs.size_); + } else { + copy(p_, rhs.p_, rhs.size_); + } + size_ = rhs.size_; + return *this; + } +#if (CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11) + AlignedArray(AlignedArray&& rhs) CYBOZU_NOEXCEPT + : p_(rhs.p_) + , size_(rhs.size_) + , allocSize_(rhs.allocSize_) + { + rhs.p_ = 0; + rhs.size_ = 0; + rhs.allocSize_ = 0; + } + AlignedArray& operator=(AlignedArray&& rhs) CYBOZU_NOEXCEPT + { + swap(rhs); + rhs.clear(); + return *this; + } +#endif + /* + don't clear buffer with zero if doClear is false + @note don't free if shrinked + */ + void resize(size_t size, bool doClear = defaultDoClear) + { + // shrink + if (size <= size_) { + size_ = size; + return; + } + // realloc if necessary + if (size > allocSize_) { + allocCopy(size, p_, size_); + } + if (doClear) setZero(p_ + size_, size - size_); + size_ = size; + } + void clear() // not free + { + size_ = 0; + } + ~AlignedArray() + { + AlignedFree(p_); + } + void swap(AlignedArray& rhs) CYBOZU_NOEXCEPT + { + std::swap(p_, rhs.p_); + std::swap(size_, rhs.size_); + std::swap(allocSize_, rhs.allocSize_); + } + T& operator[](size_t idx) CYBOZU_NOEXCEPT { return p_[idx]; } + const T& operator[](size_t idx) const CYBOZU_NOEXCEPT { return p_[idx]; } + size_t size() const CYBOZU_NOEXCEPT { return size_; } + bool empty() const CYBOZU_NOEXCEPT { return size_ == 0; } + T* begin() CYBOZU_NOEXCEPT { return p_; } + T* end() CYBOZU_NOEXCEPT { return p_ + size_; } + const T* begin() const CYBOZU_NOEXCEPT { return p_; } + const T* end() const CYBOZU_NOEXCEPT { return p_ + size_; } + T* data() CYBOZU_NOEXCEPT { return p_; } + const T* data() const CYBOZU_NOEXCEPT { return p_; } +#if (CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11) + const T* cbegin() const CYBOZU_NOEXCEPT { return p_; } + const T* cend() const CYBOZU_NOEXCEPT { return p_ + size_; } +#endif +}; + +} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/atoi.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/atoi.hpp new file mode 100644 index 000000000..a22853a17 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/atoi.hpp @@ -0,0 +1,239 @@ +#pragma once +/** + @file + @brief converter between integer and string + + @author MITSUNARI Shigeo(@herumi) +*/ + +#include <memory.h> +#include <limits.h> +#include <limits> +#include <cybozu/exception.hpp> + +namespace cybozu { + +namespace atoi_local { + +template<typename T, size_t n> +T convertToInt(bool *b, const char *p, size_t size, const char (&max)[n], T min, T overflow1, char overflow2) +{ + if (size > 0 && *p) { + bool isMinus = false; + size_t i = 0; + if (*p == '-') { + isMinus = true; + i++; + } + if (i < size && p[i]) { + // skip leading zero + while (i < size && p[i] == '0') i++; + // check minimum + if (isMinus && size - i >= n - 1 && memcmp(max, &p[i], n - 1) == 0) { + if (b) *b = true; + return min; + } + T x = 0; + for (;;) { + unsigned char c; + if (i == size || (c = static_cast<unsigned char>(p[i])) == '\0') { + if (b) *b = true; + return isMinus ? -x : x; + } + unsigned int y = c - '0'; + if (y > 9 || x > overflow1 || (x == overflow1 && c >= overflow2)) { + break; + } + x = x * 10 + T(y); + i++; + } + } + } + if (b) { + *b = false; + return 0; + } else { + throw cybozu::Exception("atoi::convertToInt") << cybozu::exception::makeString(p, size); + } +} + +template<typename T> +T convertToUint(bool *b, const char *p, size_t size, T overflow1, char overflow2) +{ + if (size > 0 && *p) { + size_t i = 0; + // skip leading zero + while (i < size && p[i] == '0') i++; + T x = 0; + for (;;) { + unsigned char c; + if (i == size || (c = static_cast<unsigned char>(p[i])) == '\0') { + if (b) *b = true; + return x; + } + unsigned int y = c - '0'; + if (y > 9 || x > overflow1 || (x == overflow1 && c >= overflow2)) { + break; + } + x = x * 10 + T(y); + i++; + } + } + if (b) { + *b = false; + return 0; + } else { + throw cybozu::Exception("atoi::convertToUint") << cybozu::exception::makeString(p, size); + } +} + +template<typename T> +T convertHexToInt(bool *b, const char *p, size_t size) +{ + if (size > 0 && *p) { + size_t i = 0; + T x = 0; + for (;;) { + unsigned int c; + if (i == size || (c = static_cast<unsigned char>(p[i])) == '\0') { + if (b) *b = true; + return x; + } + if (c - 'A' <= 'F' - 'A') { + c = (c - 'A') + 10; + } else if (c - 'a' <= 'f' - 'a') { + c = (c - 'a') + 10; + } else if (c - '0' <= '9' - '0') { + c = c - '0'; + } else { + break; + } + // avoid overflow + if (x > (std::numeric_limits<T>::max)() / 16) break; + x = x * 16 + T(c); + i++; + } + } + if (b) { + *b = false; + return 0; + } else { + throw cybozu::Exception("atoi::convertHexToInt") << cybozu::exception::makeString(p, size); + } +} + +} // atoi_local + +/** + auto detect return value class + @note if you set bool pointer p then throw nothing and set *p = false if bad string +*/ +class atoi { + const char *p_; + size_t size_; + bool *b_; + void set(bool *b, const char *p, size_t size) + { + b_ = b; + p_ = p; + size_ = size; + } +public: + atoi(const char *p, size_t size = -1) + { + set(0, p, size); + } + atoi(bool *b, const char *p, size_t size = -1) + { + set(b, p, size); + } + atoi(const std::string& str) + { + set(0, str.c_str(), str.size()); + } + atoi(bool *b, const std::string& str) + { + set(b, str.c_str(), str.size()); + } + inline operator signed char() const + { + return atoi_local::convertToInt<signed char>(b_, p_, size_, "128", -128, 12, '8'); + } + inline operator unsigned char() const + { + return atoi_local::convertToUint<unsigned char>(b_, p_, size_, 25, '6'); + } + inline operator short() const + { + return atoi_local::convertToInt<short>(b_, p_, size_, "32768", -32768, 3276, '8'); + } + inline operator unsigned short() const + { + return atoi_local::convertToUint<unsigned short>(b_, p_, size_, 6553, '6'); + } + inline operator int() const + { + return atoi_local::convertToInt<int>(b_, p_, size_, "2147483648", INT_MIN, 214748364, '8'); + } + inline operator unsigned int() const + { + return atoi_local::convertToUint<unsigned int>(b_, p_, size_, 429496729, '6'); + } + inline operator long long() const + { + return atoi_local::convertToInt<long long>(b_, p_, size_, "9223372036854775808", LLONG_MIN, 922337203685477580LL, '8'); + } + inline operator unsigned long long() const + { + return atoi_local::convertToUint<unsigned long long>(b_, p_, size_, 1844674407370955161ULL, '6'); + } +#if defined(__SIZEOF_LONG__) && (__SIZEOF_LONG__ == 8) + inline operator long() const { return static_cast<long>(static_cast<long long>(*this)); } + inline operator unsigned long() const { return static_cast<unsigned long>(static_cast<unsigned long long>(*this)); } +#else + inline operator long() const { return static_cast<long>(static_cast<int>(*this)); } + inline operator unsigned long() const { return static_cast<unsigned long>(static_cast<unsigned int>(*this)); } +#endif +}; + +class hextoi { + const char *p_; + size_t size_; + bool *b_; + void set(bool *b, const char *p, size_t size) + { + b_ = b; + p_ = p; + size_ = size; + } +public: + hextoi(const char *p, size_t size = -1) + { + set(0, p, size); + } + hextoi(bool *b, const char *p, size_t size = -1) + { + set(b, p, size); + } + hextoi(const std::string& str) + { + set(0, str.c_str(), str.size()); + } + hextoi(bool *b, const std::string& str) + { + set(b, str.c_str(), str.size()); + } + operator unsigned char() const { return atoi_local::convertHexToInt<unsigned char>(b_, p_, size_); } + operator unsigned short() const { return atoi_local::convertHexToInt<unsigned short>(b_, p_, size_); } + operator unsigned int() const { return atoi_local::convertHexToInt<unsigned int>(b_, p_, size_); } + operator unsigned long() const { return atoi_local::convertHexToInt<unsigned long>(b_, p_, size_); } + operator unsigned long long() const { return atoi_local::convertHexToInt<unsigned long long>(b_, p_, size_); } + operator char() const { return atoi_local::convertHexToInt<char>(b_, p_, size_); } + operator signed char() const { return atoi_local::convertHexToInt<signed char>(b_, p_, size_); } + operator short() const { return atoi_local::convertHexToInt<short>(b_, p_, size_); } + operator int() const { return atoi_local::convertHexToInt<int>(b_, p_, size_); } + operator long() const { return atoi_local::convertHexToInt<long>(b_, p_, size_); } + operator long long() const { return atoi_local::convertHexToInt<long long>(b_, p_, size_); } +}; + +} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/benchmark.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/benchmark.hpp new file mode 100644 index 000000000..4c02f1869 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/benchmark.hpp @@ -0,0 +1,212 @@ +#pragma once +/** + @file + @brief measure exec time of function + @author MITSUNARI Shigeo +*/ +#if defined(_MSC_VER) && (MSC_VER <= 1500) + #include <cybozu/inttype.hpp> +#else + #include <stdint.h> +#endif +#include <stdio.h> + +#ifdef __EMSCRIPTEN__ + #define CYBOZU_BENCH_USE_GETTIMEOFDAY +#endif + +#ifdef CYBOZU_BENCH_USE_GETTIMEOFDAY + #include <sys/time.h> +#elif !defined(CYBOZU_BENCH_DONT_USE_RDTSC) + #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__x86_64__) + #define CYBOZU_BENCH_USE_RDTSC + #define CYBOZU_BENCH_USE_CPU_TIMER + #endif + #if defined(__GNUC__) && defined(__ARM_ARCH_7A__) +// #define CYBOZU_BENCH_USE_MRC +// #define CYBOZU_BENCH_USE_CPU_TIMER + #endif +#endif + + +#include <assert.h> +#include <time.h> +#ifdef _MSC_VER + #include <intrin.h> + #include <sys/timeb.h> +#else +#endif + +#ifndef CYBOZU_UNUSED + #ifdef __GNUC__ + #define CYBOZU_UNUSED __attribute__((unused)) + #else + #define CYBOZU_UNUSED + #endif +#endif + +namespace cybozu { + +namespace bench { + +static void (*g_putCallback)(double); + +static inline void setPutCallback(void (*f)(double)) +{ + g_putCallback = f; +} + +} // cybozu::bench + +class CpuClock { +public: + static inline uint64_t getCpuClk() + { +#ifdef CYBOZU_BENCH_USE_RDTSC +#ifdef _MSC_VER + return __rdtsc(); +#else + unsigned int eax, edx; + __asm__ volatile("rdtsc" : "=a"(eax), "=d"(edx)); + return ((uint64_t)edx << 32) | eax; +#endif +#elif defined(CYBOZU_BENCH_USE_MRC) + uint32_t clk; + __asm__ volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(clk)); + return clk; +#else +#ifdef _MSC_VER + struct _timeb timeb; + _ftime_s(&timeb); + return uint64_t(timeb.time) * 1000000000 + timeb.millitm * 1000000; +#elif defined(CYBOZU_BENCH_USE_GETTIMEOFDAY) + struct timeval tv; + int ret CYBOZU_UNUSED = gettimeofday(&tv, 0); + assert(ret == 0); + return uint64_t(tv.tv_sec) * 1000000000 + tv.tv_usec * 1000; +#else + struct timespec tp; + int ret CYBOZU_UNUSED = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &tp); + assert(ret == 0); + return uint64_t(tp.tv_sec) * 1000000000 + tp.tv_nsec; +#endif +#endif + } + CpuClock() + : clock_(0) + , count_(0) + { + } + void begin() + { + clock_ -= getCpuClk(); + } + void end() + { + clock_ += getCpuClk(); + count_++; + } + int getCount() const { return count_; } + uint64_t getClock() const { return clock_; } + void clear() { count_ = 0; clock_ = 0; } + void put(const char *msg = 0, int N = 1) const + { + double t = getClock() / double(getCount()) / N; + if (msg && *msg) printf("%s ", msg); + if (bench::g_putCallback) { + bench::g_putCallback(t); + return; + } +#ifdef CYBOZU_BENCH_USE_CPU_TIMER + if (t > 1e6) { + printf("%7.3fMclk", t * 1e-6); + } else if (t > 1e3) { + printf("%7.3fKclk", t * 1e-3); + } else { + printf("%6.2f clk", t); + } +#else + if (t > 1e6) { + printf("%7.3fmsec", t * 1e-6); + } else if (t > 1e3) { + printf("%7.3fusec", t * 1e-3); + } else { + printf("%6.2fnsec", t); + } +#endif + if (msg && *msg) printf("\n"); + } + // adhoc constatns for CYBOZU_BENCH +#ifdef CYBOZU_BENCH_USE_CPU_TIMER + static const int loopN1 = 1000; + static const int loopN2 = 100; + static const uint64_t maxClk = (uint64_t)1e8; +#else + static const int loopN1 = 100; + static const int loopN2 = 100; + static const uint64_t maxClk = (uint64_t)1e8; +#endif +private: + uint64_t clock_; + int count_; +}; + +namespace bench { + +static CpuClock g_clk; +static int CYBOZU_UNUSED g_loopNum; + +} // cybozu::bench +/* + loop counter is automatically determined + CYBOZU_BENCH(<msg>, <func>, <param1>, <param2>, ...); + if msg == "" then only set g_clk, g_loopNum +*/ +#define CYBOZU_BENCH(msg, func, ...) \ +{ \ + const uint64_t _cybozu_maxClk = cybozu::CpuClock::maxClk; \ + cybozu::CpuClock _cybozu_clk; \ + for (int _cybozu_i = 0; _cybozu_i < cybozu::CpuClock::loopN2; _cybozu_i++) { \ + _cybozu_clk.begin(); \ + for (int _cybozu_j = 0; _cybozu_j < cybozu::CpuClock::loopN1; _cybozu_j++) { func(__VA_ARGS__); } \ + _cybozu_clk.end(); \ + if (_cybozu_clk.getClock() > _cybozu_maxClk) break; \ + } \ + if (msg && *msg) _cybozu_clk.put(msg, cybozu::CpuClock::loopN1); \ + cybozu::bench::g_clk = _cybozu_clk; cybozu::bench::g_loopNum = cybozu::CpuClock::loopN1; \ +} + +/* + double clk; + CYBOZU_BENCH_T(clk, <func>, <param1>, <param2>, ...); + clk is set by CYBOZU_BENCH_T +*/ +#define CYBOZU_BENCH_T(clk, func, ...) \ +{ \ + const uint64_t _cybozu_maxClk = cybozu::CpuClock::maxClk; \ + cybozu::CpuClock _cybozu_clk; \ + for (int _cybozu_i = 0; _cybozu_i < cybozu::CpuClock::loopN2; _cybozu_i++) { \ + _cybozu_clk.begin(); \ + for (int _cybozu_j = 0; _cybozu_j < cybozu::CpuClock::loopN1; _cybozu_j++) { func(__VA_ARGS__); } \ + _cybozu_clk.end(); \ + if (_cybozu_clk.getClock() > _cybozu_maxClk) break; \ + } \ + clk = _cybozu_clk.getClock() / (double)_cybozu_clk.getCount() / cybozu::CpuClock::loopN1; \ +} + +/* + loop counter N is given + CYBOZU_BENCH_C(<msg>, <counter>, <func>, <param1>, <param2>, ...); + if msg == "" then only set g_clk, g_loopNum +*/ +#define CYBOZU_BENCH_C(msg, _N, func, ...) \ +{ \ + cybozu::CpuClock _cybozu_clk; \ + _cybozu_clk.begin(); \ + for (int _cybozu_j = 0; _cybozu_j < _N; _cybozu_j++) { func(__VA_ARGS__); } \ + _cybozu_clk.end(); \ + if (msg && *msg) _cybozu_clk.put(msg, _N); \ + cybozu::bench::g_clk = _cybozu_clk; cybozu::bench::g_loopNum = _N; \ +} + +} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/bit_operation.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/bit_operation.hpp new file mode 100644 index 000000000..865c1e47d --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/bit_operation.hpp @@ -0,0 +1,139 @@ +#pragma once +/** + @file + @brief bit operation +*/ +#include <assert.h> +#include <cybozu/inttype.hpp> + +#if (CYBOZU_HOST == CYBOZU_HOST_INTEL) + #if defined(_WIN32) + #include <intrin.h> + #elif defined(__linux__) || defined(__CYGWIN__) || defined(__clang__) + #include <x86intrin.h> + #elif defined(__GNUC__) + #include <emmintrin.h> + #endif +#endif + +namespace cybozu { + +namespace bit_op_local { + +template<bool equalTo8> +struct Tag {}; + +// sizeof(T) < 8 +template<> +struct Tag<false> { + template<class T> + static inline int bsf(T x) + { +#if defined(_MSC_VER) + unsigned long out; + _BitScanForward(&out, x); +#pragma warning(suppress: 6102) + return out; +#else + return __builtin_ctz(x); +#endif + } + template<class T> + static inline int bsr(T x) + { +#if defined(_MSC_VER) + unsigned long out; + _BitScanReverse(&out, x); +#pragma warning(suppress: 6102) + return out; +#else + return __builtin_clz(x) ^ 0x1f; +#endif + } +}; + +// sizeof(T) == 8 +template<> +struct Tag<true> { + template<class T> + static inline int bsf(T x) + { +#if defined(_MSC_VER) && defined(_WIN64) + unsigned long out; + _BitScanForward64(&out, x); +#pragma warning(suppress: 6102) + return out; +#elif defined(__x86_64__) + return __builtin_ctzll(x); +#else + const uint32_t L = uint32_t(x); + if (L) return Tag<false>::bsf(L); + const uint32_t H = uint32_t(x >> 32); + return Tag<false>::bsf(H) + 32; +#endif + } + template<class T> + static inline int bsr(T x) + { +#if defined(_MSC_VER) && defined(_WIN64) + unsigned long out; + _BitScanReverse64(&out, x); +#pragma warning(suppress: 6102) + return out; +#elif defined(__x86_64__) + return __builtin_clzll(x) ^ 0x3f; +#else + const uint32_t H = uint32_t(x >> 32); + if (H) return Tag<false>::bsr(H) + 32; + const uint32_t L = uint32_t(x); + return Tag<false>::bsr(L); +#endif + } +}; + +} // bit_op_local + +template<class T> +int bsf(T x) +{ + return bit_op_local::Tag<sizeof(T) == 8>::bsf(x); +} +template<class T> +int bsr(T x) +{ + return bit_op_local::Tag<sizeof(T) == 8>::bsr(x); +} + +template<class T> +uint64_t makeBitMask64(T x) +{ + assert(x < 64); + return (uint64_t(1) << x) - 1; +} + +template<class T> +uint32_t popcnt(T x); + +template<> +inline uint32_t popcnt<uint32_t>(uint32_t x) +{ +#if defined(_MSC_VER) + return static_cast<uint32_t>(_mm_popcnt_u32(x)); +#else + return static_cast<uint32_t>(__builtin_popcount(x)); +#endif +} + +template<> +inline uint32_t popcnt<uint64_t>(uint64_t x) +{ +#if defined(__x86_64__) + return static_cast<uint32_t>(__builtin_popcountll(x)); +#elif defined(_WIN64) + return static_cast<uint32_t>(_mm_popcnt_u64(x)); +#else + return popcnt<uint32_t>(static_cast<uint32_t>(x)) + popcnt<uint32_t>(static_cast<uint32_t>(x >> 32)); +#endif +} + +} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/critical_section.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/critical_section.hpp new file mode 100644 index 000000000..13d7f3a0e --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/critical_section.hpp @@ -0,0 +1,60 @@ +#pragma once +/** + @file + @brief critical section + + @author MITSUNARI Shigeo(@herumi) + @author MITSUNARI Shigeo +*/ +#include <cybozu/mutex.hpp> + +namespace cybozu { + +class ConditionVariableCs; + +namespace thread { + +#ifdef _WIN32 +typedef CRITICAL_SECTION CsHandle; +inline void CsInit(CsHandle& cs) { InitializeCriticalSection(&cs); } +inline void CsLock(CsHandle& cs) { EnterCriticalSection(&cs); } +inline void CsUnlock(CsHandle& cs) { LeaveCriticalSection(&cs); } +inline void CsTerm(CsHandle& cs) { DeleteCriticalSection(&cs); } +#else +typedef pthread_mutex_t CsHandle; +inline void CsInit(CsHandle& cs) { pthread_mutex_init(&cs, NULL); } +inline void CsLock(CsHandle& cs) { pthread_mutex_lock(&cs); } +inline void CsUnlock(CsHandle& cs) { pthread_mutex_unlock(&cs); } +inline void CsTerm(CsHandle& cs) { pthread_mutex_destroy(&cs); } +#endif + +} // cybozu::thread + +class CriticalSection { + friend class cybozu::ConditionVariableCs; +public: + CriticalSection() + { + thread::CsInit(hdl_); + } + ~CriticalSection() + { + thread::CsTerm(hdl_); + } + inline void lock() + { + thread::CsLock(hdl_); + } + inline void unlock() + { + thread::CsUnlock(hdl_); + } +private: + CriticalSection(const CriticalSection&); + CriticalSection& operator=(const CriticalSection&); + thread::CsHandle hdl_; +}; + +typedef cybozu::thread::AutoLockT<cybozu::CriticalSection> AutoLockCs; //!< auto lock critical section + +} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/crypto.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/crypto.hpp new file mode 100644 index 000000000..d427179d9 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/crypto.hpp @@ -0,0 +1,321 @@ +#pragma once +/** + @file + @brief wrap openssl + @author MITSUNARI Shigeo(@herumi) +*/ + +#include <cybozu/exception.hpp> +#ifdef __APPLE__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#endif +#if 0 //#ifdef __APPLE__ + #define COMMON_DIGEST_FOR_OPENSSL + #include <CommonCrypto/CommonDigest.h> + #include <CommonCrypto/CommonHMAC.h> + #define SHA1 CC_SHA1 + #define SHA224 CC_SHA224 + #define SHA256 CC_SHA256 + #define SHA384 CC_SHA384 + #define SHA512 CC_SHA512 +#else +#include <openssl/hmac.h> +#include <openssl/evp.h> +#include <openssl/sha.h> +#endif +#ifdef _MSC_VER + #include <cybozu/link_libeay32.hpp> +#endif + +namespace cybozu { + +namespace crypto { + +class Hash { +public: + enum Name { + N_SHA1, + N_SHA224, + N_SHA256, + N_SHA384, + N_SHA512 + }; +private: + Name name_; + size_t hashSize_; + union { + SHA_CTX sha1; + SHA256_CTX sha256; + SHA512_CTX sha512; + } ctx_; +public: + static inline size_t getSize(Name name) + { + switch (name) { + case N_SHA1: return SHA_DIGEST_LENGTH; + case N_SHA224: return SHA224_DIGEST_LENGTH; + case N_SHA256: return SHA256_DIGEST_LENGTH; + case N_SHA384: return SHA384_DIGEST_LENGTH; + case N_SHA512: return SHA512_DIGEST_LENGTH; + default: + throw cybozu::Exception("crypto:Hash:getSize") << name; + } + } + static inline const char *getName(Name name) + { + switch (name) { + case N_SHA1: return "sha1"; + case N_SHA224: return "sha224"; + case N_SHA256: return "sha256"; + case N_SHA384: return "sha384"; + case N_SHA512: return "sha512"; + default: + throw cybozu::Exception("crypto:Hash:getName") << name; + } + } + static inline Name getName(const std::string& nameStr) + { + static const struct { + const char *nameStr; + Name name; + } tbl[] = { + { "sha1", N_SHA1 }, + { "sha224", N_SHA224 }, + { "sha256", N_SHA256 }, + { "sha384", N_SHA384 }, + { "sha512", N_SHA512 }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + if (nameStr == tbl[i].nameStr) return tbl[i].name; + } + throw cybozu::Exception("crypto:Hash:getName") << nameStr; + } + explicit Hash(Name name = N_SHA1) + : name_(name) + , hashSize_(getSize(name)) + { + reset(); + } + void update(const void *buf, size_t bufSize) + { + switch (name_) { + case N_SHA1: SHA1_Update(&ctx_.sha1, buf, bufSize); break; + case N_SHA224: SHA224_Update(&ctx_.sha256, buf, bufSize); break; + case N_SHA256: SHA256_Update(&ctx_.sha256, buf, bufSize); break; + case N_SHA384: SHA384_Update(&ctx_.sha512, buf, bufSize); break; + case N_SHA512: SHA512_Update(&ctx_.sha512, buf, bufSize); break; + } + } + void update(const std::string& buf) + { + update(buf.c_str(), buf.size()); + } + void reset() + { + switch (name_) { + case N_SHA1: SHA1_Init(&ctx_.sha1); break; + case N_SHA224: SHA224_Init(&ctx_.sha256); break; + case N_SHA256: SHA256_Init(&ctx_.sha256); break; + case N_SHA384: SHA384_Init(&ctx_.sha512); break; + case N_SHA512: SHA512_Init(&ctx_.sha512); break; + default: + throw cybozu::Exception("crypto:Hash:rset") << name_; + } + } + /* + md must have hashSize byte + @note clear inner buffer after calling digest + */ + void digest(void *out, const void *buf, size_t bufSize) + { + update(buf, bufSize); + unsigned char *md = reinterpret_cast<unsigned char*>(out); + switch (name_) { + case N_SHA1: SHA1_Final(md, &ctx_.sha1); break; + case N_SHA224: SHA224_Final(md, &ctx_.sha256); break; + case N_SHA256: SHA256_Final(md, &ctx_.sha256); break; + case N_SHA384: SHA384_Final(md, &ctx_.sha512); break; + case N_SHA512: SHA512_Final(md, &ctx_.sha512); break; + default: + throw cybozu::Exception("crypto:Hash:digest") << name_; + } + reset(); + } + std::string digest(const void *buf, size_t bufSize) + { + std::string ret; + ret.resize(hashSize_); + digest(&ret[0], buf, bufSize); + return ret; + } + std::string digest(const std::string& buf = "") + { + return digest(buf.c_str(), buf.size()); + } + /* + out must have necessary size + @note return written size + */ + static inline size_t digest(void *out, Name name, const void *buf, size_t bufSize) + { + unsigned char *md = (unsigned char*)out; + const unsigned char *src = cybozu::cast<const unsigned char *>(buf); + switch (name) { + case N_SHA1: SHA1(src, bufSize, md); return 160 / 8; + case N_SHA224: SHA224(src, bufSize, md); return 224 / 8; + case N_SHA256: SHA256(src, bufSize, md); return 256 / 8; + case N_SHA384: SHA384(src, bufSize, md); return 384 / 8; + case N_SHA512: SHA512(src, bufSize, md); return 512 / 8; + default: + return 0; + } + } + static inline std::string digest(Name name, const void *buf, size_t bufSize) + { + char md[128]; + size_t size = digest(md, name, buf, bufSize); + if (size == 0) throw cybozu::Exception("crypt:Hash:digest") << name; + return std::string(md, size); + } + static inline std::string digest(Name name, const std::string& buf) + { + return digest(name, buf.c_str(), buf.size()); + } +}; + +class Hmac { + const EVP_MD *evp_; +public: + explicit Hmac(Hash::Name name = Hash::N_SHA1) + { + switch (name) { + case Hash::N_SHA1: evp_ = EVP_sha1(); break; + case Hash::N_SHA224: evp_ = EVP_sha224(); break; + case Hash::N_SHA256: evp_ = EVP_sha256(); break; + case Hash::N_SHA384: evp_ = EVP_sha384(); break; + case Hash::N_SHA512: evp_ = EVP_sha512(); break; + default: + throw cybozu::Exception("crypto:Hmac:") << name; + } + } + std::string eval(const std::string& key, const std::string& data) + { + std::string out(EVP_MD_size(evp_) + 1, 0); + unsigned int outLen = 0; + if (HMAC(evp_, key.c_str(), static_cast<int>(key.size()), + cybozu::cast<const uint8_t *>(data.c_str()), data.size(), cybozu::cast<uint8_t *>(&out[0]), &outLen)) { + out.resize(outLen); + return out; + } + throw cybozu::Exception("crypto::Hamc::eval"); + } +}; + +class Cipher { + const EVP_CIPHER *cipher_; + EVP_CIPHER_CTX *ctx_; +public: + enum Name { + N_AES128_CBC, + N_AES192_CBC, + N_AES256_CBC, + N_AES128_ECB, // be carefull to use + N_AES192_ECB, // be carefull to use + N_AES256_ECB, // be carefull to use + }; + static inline size_t getSize(Name name) + { + switch (name) { + case N_AES128_CBC: return 128; + case N_AES192_CBC: return 192; + case N_AES256_CBC: return 256; + case N_AES128_ECB: return 128; + case N_AES192_ECB: return 192; + case N_AES256_ECB: return 256; + default: + throw cybozu::Exception("crypto:Cipher:getSize") << name; + } + } + enum Mode { + Decoding, + Encoding + }; + explicit Cipher(Name name = N_AES128_CBC) + : cipher_(0) + , ctx_(0) + { + ctx_ = EVP_CIPHER_CTX_new(); + if (ctx_ == 0) throw cybozu::Exception("crypto:Cipher:EVP_CIPHER_CTX_new"); + switch (name) { + case N_AES128_CBC: cipher_ = EVP_aes_128_cbc(); break; + case N_AES192_CBC: cipher_ = EVP_aes_192_cbc(); break; + case N_AES256_CBC: cipher_ = EVP_aes_256_cbc(); break; + case N_AES128_ECB: cipher_ = EVP_aes_128_ecb(); break; + case N_AES192_ECB: cipher_ = EVP_aes_192_ecb(); break; + case N_AES256_ECB: cipher_ = EVP_aes_256_ecb(); break; + default: + throw cybozu::Exception("crypto:Cipher:Cipher:name") << (int)name; + } + } + ~Cipher() + { + if (ctx_) EVP_CIPHER_CTX_free(ctx_); + } + /* + @note don't use padding = true + */ + void setup(Mode mode, const std::string& key, const std::string& iv, bool padding = false) + { + const int keyLen = static_cast<int>(key.size()); + const int expectedKeyLen = EVP_CIPHER_key_length(cipher_); + if (keyLen != expectedKeyLen) { + throw cybozu::Exception("crypto:Cipher:setup:keyLen") << keyLen << expectedKeyLen; + } + + int ret = EVP_CipherInit_ex(ctx_, cipher_, NULL, cybozu::cast<const uint8_t*>(key.c_str()), cybozu::cast<const uint8_t*>(iv.c_str()), mode == Encoding ? 1 : 0); + if (ret != 1) { + throw cybozu::Exception("crypto:Cipher:setup:EVP_CipherInit_ex") << ret; + } + ret = EVP_CIPHER_CTX_set_padding(ctx_, padding ? 1 : 0); + if (ret != 1) { + throw cybozu::Exception("crypto:Cipher:setup:EVP_CIPHER_CTX_set_padding") << ret; + } +/* + const int ivLen = static_cast<int>(iv.size()); + const int expectedIvLen = EVP_CIPHER_CTX_iv_length(&ctx_); + if (ivLen != expectedIvLen) { + throw cybozu::Exception("crypto:Cipher:setup:ivLen") << ivLen << expectedIvLen; + } +*/ + } + /* + the size of outBuf must be larger than inBufSize + blockSize + @retval positive or 0 : writeSize(+blockSize) + @retval -1 : error + */ + int update(char *outBuf, const char *inBuf, int inBufSize) + { + int outLen = 0; + int ret = EVP_CipherUpdate(ctx_, cybozu::cast<uint8_t*>(outBuf), &outLen, cybozu::cast<const uint8_t*>(inBuf), inBufSize); + if (ret != 1) return -1; + return outLen; + } + /* + return -1 if padding + @note don't use + */ + int finalize(char *outBuf) + { + int outLen = 0; + int ret = EVP_CipherFinal_ex(ctx_, cybozu::cast<uint8_t*>(outBuf), &outLen); + if (ret != 1) return -1; + return outLen; + } +}; + +} } // cybozu::crypto + +#ifdef __APPLE__ + #pragma GCC diagnostic pop +#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/endian.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/endian.hpp new file mode 100644 index 000000000..3f1575c46 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/endian.hpp @@ -0,0 +1,224 @@ +#pragma once + +/** + @file + @brief deal with big and little endian + + @author MITSUNARI Shigeo(@herumi) +*/ +#include <cybozu/inttype.hpp> +#include <string.h> +#include <stdlib.h> +#include <stdio.h> + +namespace cybozu { + +#ifdef _MSC_VER +inline uint16_t byteSwap(uint16_t x) { return _byteswap_ushort(x); } +inline uint32_t byteSwap(uint32_t x) { return _byteswap_ulong(x); } +inline uint64_t byteSwap(uint64_t x) { return _byteswap_uint64(x); } +#else +#if (((__GNUC__) << 16) + (__GNUC_MINOR__)) >= ((4 << 16) + 8) +inline uint16_t byteSwap(uint16_t x) { return __builtin_bswap16(x); } +#else +inline uint16_t byteSwap(uint16_t x) { return (x >> 8) | (x << 8); } +#endif +inline uint32_t byteSwap(uint32_t x) { return __builtin_bswap32(x); } +inline uint64_t byteSwap(uint64_t x) { return __builtin_bswap64(x); } +#endif + +/** + get 16bit integer as little endian + @param src [in] pointer +*/ +inline uint16_t Get16bitAsLE(const void *src) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + uint16_t x; + memcpy(&x, src, sizeof(x)); + return x; +#else + const uint8_t *p = static_cast<const uint8_t *>(src); + return p[0] | (p[1] << 8); +#endif +} + +/** + get 32bit integer as little endian + @param src [in] pointer +*/ +inline uint32_t Get32bitAsLE(const void *src) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + uint32_t x; + memcpy(&x, src, sizeof(x)); + return x; +#else + const uint8_t *p = static_cast<const uint8_t *>(src); + return Get16bitAsLE(p) | (static_cast<uint32_t>(Get16bitAsLE(p + 2)) << 16); +#endif +} + +/** + get 64bit integer as little endian + @param src [in] pointer +*/ +inline uint64_t Get64bitAsLE(const void *src) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + uint64_t x; + memcpy(&x, src, sizeof(x)); + return x; +#else + const uint8_t *p = static_cast<const uint8_t *>(src); + return Get32bitAsLE(p) | (static_cast<uint64_t>(Get32bitAsLE(p + 4)) << 32); +#endif +} + +/** + get 16bit integer as bit endian + @param src [in] pointer +*/ +inline uint16_t Get16bitAsBE(const void *src) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + uint16_t x; + memcpy(&x, src, sizeof(x)); + return byteSwap(x); +#else + const uint8_t *p = static_cast<const uint8_t *>(src); + return p[1] | (p[0] << 8); +#endif +} + +/** + get 32bit integer as bit endian + @param src [in] pointer +*/ +inline uint32_t Get32bitAsBE(const void *src) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + uint32_t x; + memcpy(&x, src, sizeof(x)); + return byteSwap(x); +#else + const uint8_t *p = static_cast<const uint8_t *>(src); + return Get16bitAsBE(p + 2) | (static_cast<uint32_t>(Get16bitAsBE(p)) << 16); +#endif +} + +/** + get 64bit integer as big endian + @param src [in] pointer +*/ +inline uint64_t Get64bitAsBE(const void *src) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + uint64_t x; + memcpy(&x, src, sizeof(x)); + return byteSwap(x); +#else + const uint8_t *p = static_cast<const uint8_t *>(src); + return Get32bitAsBE(p + 4) | (static_cast<uint64_t>(Get32bitAsBE(p)) << 32); +#endif +} + +/** + set 16bit integer as little endian + @param src [out] pointer + @param x [in] integer +*/ +inline void Set16bitAsLE(void *src, uint16_t x) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + memcpy(src, &x, sizeof(x)); +#else + uint8_t *p = static_cast<uint8_t *>(src); + p[0] = static_cast<uint8_t>(x); + p[1] = static_cast<uint8_t>(x >> 8); +#endif +} +/** + set 32bit integer as little endian + @param src [out] pointer + @param x [in] integer +*/ +inline void Set32bitAsLE(void *src, uint32_t x) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + memcpy(src, &x, sizeof(x)); +#else + uint8_t *p = static_cast<uint8_t *>(src); + p[0] = static_cast<uint8_t>(x); + p[1] = static_cast<uint8_t>(x >> 8); + p[2] = static_cast<uint8_t>(x >> 16); + p[3] = static_cast<uint8_t>(x >> 24); +#endif +} +/** + set 64bit integer as little endian + @param src [out] pointer + @param x [in] integer +*/ +inline void Set64bitAsLE(void *src, uint64_t x) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + memcpy(src, &x, sizeof(x)); +#else + uint8_t *p = static_cast<uint8_t *>(src); + Set32bitAsLE(p, static_cast<uint32_t>(x)); + Set32bitAsLE(p + 4, static_cast<uint32_t>(x >> 32)); +#endif +} +/** + set 16bit integer as big endian + @param src [out] pointer + @param x [in] integer +*/ +inline void Set16bitAsBE(void *src, uint16_t x) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + x = byteSwap(x); + memcpy(src, &x, sizeof(x)); +#else + uint8_t *p = static_cast<uint8_t *>(src); + p[0] = static_cast<uint8_t>(x >> 8); + p[1] = static_cast<uint8_t>(x); +#endif +} +/** + set 32bit integer as big endian + @param src [out] pointer + @param x [in] integer +*/ +inline void Set32bitAsBE(void *src, uint32_t x) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + x = byteSwap(x); + memcpy(src, &x, sizeof(x)); +#else + uint8_t *p = static_cast<uint8_t *>(src); + p[0] = static_cast<uint8_t>(x >> 24); + p[1] = static_cast<uint8_t>(x >> 16); + p[2] = static_cast<uint8_t>(x >> 8); + p[3] = static_cast<uint8_t>(x); +#endif +} +/** + set 64bit integer as big endian + @param src [out] pointer + @param x [in] integer +*/ +inline void Set64bitAsBE(void *src, uint64_t x) +{ +#if CYBOZU_ENDIAN == CYBOZU_ENDIAN_LITTLE + x = byteSwap(x); + memcpy(src, &x, sizeof(x)); +#else + uint8_t *p = static_cast<uint8_t *>(src); + Set32bitAsBE(p, static_cast<uint32_t>(x >> 32)); + Set32bitAsBE(p + 4, static_cast<uint32_t>(x)); +#endif +} + +} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/exception.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/exception.hpp new file mode 100644 index 000000000..247ba4de0 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/exception.hpp @@ -0,0 +1,252 @@ +#pragma once +/** + @file + @brief definition of abstruct exception class + @author MITSUNARI Shigeo(@herumi) +*/ +#ifdef CYBOZU_MINIMUM_EXCEPTION + +#include <cybozu/inttype.hpp> + +namespace cybozu { + +namespace exception { +inline const char *makeString(const char *, size_t) +{ + return ""; +} + +} // cybozu::exception + +class Exception { +public: + explicit Exception(const char* = 0, bool = true) + { + } + ~Exception() CYBOZU_NOEXCEPT {} + const char *what() const CYBOZU_NOEXCEPT { return "cybozu:Exception"; } + template<class T> + Exception& operator<<(const T&) + { + return *this; + } +}; + +} // cybozu + +#else + +#include <string> +#include <algorithm> +#include <sstream> +#include <errno.h> +#include <stdio.h> +#ifdef _WIN32 + #include <winsock2.h> + #include <windows.h> +#else + #include <string.h> // for strerror_r +#endif +#include <cybozu/inttype.hpp> +#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE + #include <cybozu/stacktrace.hpp> +#endif + +namespace cybozu { + +const bool DontThrow = true; + +namespace exception { + +/* get max 16 characters to avoid buffer overrun */ +inline std::string makeString(const char *str, size_t size) +{ + return std::string(str, std::min<size_t>(size, 16)); +} + +#ifdef _WIN32 +inline std::string wstr2str(const std::wstring& wstr) +{ + std::string str; + for (size_t i = 0; i < wstr.size(); i++) { + uint16_t c = wstr[i]; + if (c < 0x80) { + str += char(c); + } else { + char buf[16]; + CYBOZU_SNPRINTF(buf, sizeof(buf), "\\u%04x", c); + str += buf; + } + } + return str; +} +#endif + +} // cybozu::exception + +/** + convert errno to string + @param err [in] errno + @note for both windows and linux +*/ +inline std::string ConvertErrorNoToString(int err) +{ + char errBuf[256]; + std::string ret; +#ifdef _WIN32 + if (strerror_s(errBuf, sizeof(errBuf), err) == 0) { + ret = errBuf; + } else { + ret = "err"; + } +#elif defined(_GNU_SOURCE) + ret = ::strerror_r(err, errBuf, sizeof(errBuf)); +#else + if (strerror_r(err, errBuf, sizeof(errBuf)) == 0) { + ret = errBuf; + } else { + ret = "err"; + } +#endif + char buf2[64]; + CYBOZU_SNPRINTF(buf2, sizeof(buf2), "(%d)", err); + ret += buf2; + return ret; +} + +class Exception : public std::exception { + mutable std::string str_; +#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE + mutable std::string stackTrace_; +#endif +public: + explicit Exception(const std::string& name = "", bool enableStackTrace = true) + : str_(name) + { +#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE + if (enableStackTrace) stackTrace_ = cybozu::StackTrace().toString(); +#else + cybozu::disable_warning_unused_variable(enableStackTrace); +#endif + } + ~Exception() CYBOZU_NOEXCEPT {} + const char *what() const CYBOZU_NOEXCEPT { return toString().c_str(); } + const std::string& toString() const CYBOZU_NOEXCEPT + { +#ifdef CYBOZU_EXCEPTION_WITH_STACKTRACE + try { + if (!stackTrace_.empty()) { +#ifdef CYBOZU_STACKTRACE_ONELINE + str_ += "\n<<<STACKTRACE>>> "; + str_ += stackTrace_; +#else + str_ += "\n<<<STACKTRACE\n"; + str_ += stackTrace_; + str_ += "\n>>>STACKTRACE"; +#endif + } + } catch (...) { + } + stackTrace_.clear(); +#endif + return str_; + } + Exception& operator<<(const char *s) + { + str_ += ':'; + str_ += s; + return *this; + } + Exception& operator<<(const std::string& s) + { + return operator<<(s.c_str()); + } +#ifdef _WIN32 + Exception& operator<<(const std::wstring& s) + { + return operator<<(cybozu::exception::wstr2str(s)); + } +#endif + template<class T> + Exception& operator<<(const T& x) + { + std::ostringstream os; + os << x; + return operator<<(os.str()); + } +}; + +class ErrorNo { +public: +#ifdef _WIN32 + typedef unsigned int NativeErrorNo; +#else + typedef int NativeErrorNo; +#endif + explicit ErrorNo(NativeErrorNo err) + : err_(err) + { + } + ErrorNo() + : err_(getLatestNativeErrorNo()) + { + } + NativeErrorNo getLatestNativeErrorNo() const + { +#ifdef _WIN32 + return ::GetLastError(); +#else + return errno; +#endif + } + /** + convert NativeErrNo to string(maybe UTF8) + @param err [in] errno + @note Linux : same as ConvertErrorNoToString + Windows : for Win32 API(use en-us) + */ + std::string toString() const + { +#ifdef _WIN32 + const int msgSize = 256; + wchar_t msg[msgSize]; + int size = FormatMessageW( + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + 0, + err_, + MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US), + msg, + msgSize, + NULL + ); + if (size <= 0) return ""; + // remove last "\r\n" + if (size > 2 && msg[size - 2] == '\r') { + msg[size - 2] = 0; + size -= 2; + } + std::string ret; + ret.resize(size); + // assume ascii only + for (int i = 0; i < size; i++) { + ret[i] = (char)msg[i]; + } + char buf2[64]; + CYBOZU_SNPRINTF(buf2, sizeof(buf2), "(%u)", err_); + ret += buf2; + return ret; +#else + return ConvertErrorNoToString(err_); +#endif + } +private: + NativeErrorNo err_; +}; + +inline std::ostream& operator<<(std::ostream& os, const cybozu::ErrorNo& self) +{ + return os << self.toString(); +} + +} // cybozu +#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/hash.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/hash.hpp new file mode 100644 index 000000000..3fd246fa1 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/hash.hpp @@ -0,0 +1,67 @@ +#pragma once +#include <cybozu/inttype.hpp> + +namespace cybozu { + +template<class Iter> +uint32_t hash32(Iter begin, Iter end, uint32_t v = 0) +{ + if (v == 0) v = 2166136261U; + while (begin != end) { + v ^= *begin++; + v *= 16777619; + } + return v; +} +template<class Iter> +uint64_t hash64(Iter begin, Iter end, uint64_t v = 0) +{ + if (v == 0) v = 14695981039346656037ULL; + while (begin != end) { + v ^= *begin++; + v *= 1099511628211ULL; + } + v ^= v >> 32; + return v; +} +template<class T> +uint32_t hash32(const T *x, size_t n, uint32_t v = 0) +{ + return hash32(x, x + n, v); +} +template<class T> +uint64_t hash64(const T *x, size_t n, uint64_t v = 0) +{ + return hash64(x, x + n, v); +} + +} // cybozu + +namespace boost { + +template<class T> +struct hash; + +} // boost + +#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 +#include <functional> +#else + +namespace std { CYBOZU_NAMESPACE_TR1_BEGIN + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4099) // missmatch class and struct +#endif +#ifndef __APPLE__ +template<class T> +struct hash; +#endif +#ifdef _MSC_VER + #pragma warning(pop) +#endif + +CYBOZU_NAMESPACE_TR1_END } // std + +#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/inttype.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/inttype.hpp new file mode 100644 index 000000000..62856bdb3 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/inttype.hpp @@ -0,0 +1,163 @@ +#pragma once +/** + @file + @brief int type definition and macros + @author MITSUNARI Shigeo(@herumi) +*/ + +#if defined(_MSC_VER) && (MSC_VER <= 1500) && !defined(CYBOZU_DEFINED_INTXX) + #define CYBOZU_DEFINED_INTXX + typedef __int64 int64_t; + typedef unsigned __int64 uint64_t; + typedef unsigned int uint32_t; + typedef int int32_t; + typedef unsigned short uint16_t; + typedef short int16_t; + typedef unsigned char uint8_t; + typedef signed char int8_t; +#else + #include <stdint.h> +#endif + +#ifdef _MSC_VER + #ifndef CYBOZU_DEFINED_SSIZE_T + #define CYBOZU_DEFINED_SSIZE_T + #ifdef _WIN64 + typedef int64_t ssize_t; + #else + typedef int32_t ssize_t; + #endif + #endif +#else + #include <unistd.h> // for ssize_t +#endif + +#ifndef CYBOZU_ALIGN + #ifdef _MSC_VER + #define CYBOZU_ALIGN(x) __declspec(align(x)) + #else + #define CYBOZU_ALIGN(x) __attribute__((aligned(x))) + #endif +#endif +#ifndef CYBOZU_FORCE_INLINE + #ifdef _MSC_VER + #define CYBOZU_FORCE_INLINE __forceinline + #else + #define CYBOZU_FORCE_INLINE __attribute__((always_inline)) + #endif +#endif +#ifndef CYBOZU_UNUSED + #ifdef __GNUC__ + #define CYBOZU_UNUSED __attribute__((unused)) + #else + #define CYBOZU_UNUSED + #endif +#endif +#ifndef CYBOZU_ALLOCA + #ifdef _MSC_VER + #include <malloc.h> + #define CYBOZU_ALLOCA(x) _malloca(x) + #else + #define CYBOZU_ALLOCA(x) __builtin_alloca(x) + #endif +#endif +#ifndef CYBOZU_NUM_OF_ARRAY + #define CYBOZU_NUM_OF_ARRAY(x) (sizeof(x) / sizeof(*x)) +#endif +#ifndef CYBOZU_SNPRINTF + #if defined(_MSC_VER) && (_MSC_VER < 1900) + #define CYBOZU_SNPRINTF(x, len, ...) (void)_snprintf_s(x, len, len - 1, __VA_ARGS__) + #else + #define CYBOZU_SNPRINTF(x, len, ...) (void)snprintf(x, len, __VA_ARGS__) + #endif +#endif + +#define CYBOZU_CPP_VERSION_CPP03 0 +#define CYBOZU_CPP_VERSION_TR1 1 +#define CYBOZU_CPP_VERSION_CPP11 2 +#define CYBOZU_CPP_VERSION_CPP14 3 +#define CYBOZU_CPP_VERSION_CPP17 4 + +#ifdef __GNUC__ + #define CYBOZU_GNUC_PREREQ(major, minor) ((__GNUC__) * 100 + (__GNUC_MINOR__) >= (major) * 100 + (minor)) +#else + #define CYBOZU_GNUC_PREREQ(major, minor) 0 +#endif + +#if (__cplusplus >= 201703) + #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP17 +#elif (__cplusplus >= 201402) + #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP14 +#elif (__cplusplus >= 201103) || (_MSC_VER >= 1500) || defined(__GXX_EXPERIMENTAL_CXX0X__) + #if defined(_MSC_VER) && (_MSC_VER <= 1600) + #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_TR1 + #else + #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP11 + #endif +#elif CYBOZU_GNUC_PREREQ(4, 5) || (CYBOZU_GNUC_PREREQ(4, 2) && __GLIBCXX__ >= 20070719) || defined(__INTEL_COMPILER) || (__clang_major__ >= 3) + #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_TR1 +#else + #define CYBOZU_CPP_VERSION CYBOZU_CPP_VERSION_CPP03 +#endif + +#ifdef CYBOZU_USE_BOOST + #define CYBOZU_NAMESPACE_STD boost + #define CYBOZU_NAMESPACE_TR1_BEGIN + #define CYBOZU_NAMESPACE_TR1_END +#elif (CYBOZU_CPP_VERSION == CYBOZU_CPP_VERSION_TR1) && !defined(__APPLE__) + #define CYBOZU_NAMESPACE_STD std::tr1 + #define CYBOZU_NAMESPACE_TR1_BEGIN namespace tr1 { + #define CYBOZU_NAMESPACE_TR1_END } +#else + #define CYBOZU_NAMESPACE_STD std + #define CYBOZU_NAMESPACE_TR1_BEGIN + #define CYBOZU_NAMESPACE_TR1_END +#endif + +#ifndef CYBOZU_OS_BIT + #if defined(_WIN64) || defined(__x86_64__) || defined(__AARCH64EL__) || defined(__EMSCRIPTEN__) + #define CYBOZU_OS_BIT 64 + #else + #define CYBOZU_OS_BIT 32 + #endif +#endif + +#ifndef CYBOZU_HOST + #define CYBOZU_HOST_UNKNOWN 0 + #define CYBOZU_HOST_INTEL 1 + #define CYBOZU_HOST_ARM 2 + #if defined(_M_IX86) || defined(_M_AMD64) || defined(__x86_64__) || defined(__i386__) + #define CYBOZU_HOST CYBOZU_HOST_INTEL + #elif defined(__arm__) || defined(__AARCH64EL__) + #define CYBOZU_HOST CYBOZU_HOST_ARM + #else + #define CYBOZU_HOST CYBOZU_HOST_UNKNOWN + #endif +#endif + +#ifndef CYBOZU_ENDIAN + #define CYBOZU_ENDIAN_UNKNOWN 0 + #define CYBOZU_ENDIAN_LITTLE 1 + #define CYBOZU_ENDIAN_BIG 2 + #if (CYBOZU_HOST == CYBOZU_HOST_INTEL) + #define CYBOZU_ENDIAN CYBOZU_ENDIAN_LITTLE + #elif (CYBOZU_HOST == CYBOZU_HOST_ARM) && (defined(__ARM_EABI__) || defined(__AARCH64EL__)) + #define CYBOZU_ENDIAN CYBOZU_ENDIAN_LITTLE + #else + #define CYBOZU_ENDIAN CYBOZU_ENDIAN_UNKNOWN + #endif +#endif + +#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 + #define CYBOZU_NOEXCEPT noexcept +#else + #define CYBOZU_NOEXCEPT throw() +#endif +namespace cybozu { +template<class T> +void disable_warning_unused_variable(const T&) { } +template<class T, class S> +T cast(const S* ptr) { return static_cast<T>(static_cast<const void*>(ptr)); } +template<class T, class S> +T cast(S* ptr) { return static_cast<T>(static_cast<void*>(ptr)); } +} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/itoa.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/itoa.hpp new file mode 100644 index 000000000..072e5b8b4 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/itoa.hpp @@ -0,0 +1,337 @@ +#pragma once +/** + @file + @brief convert integer to string(ascii) + + @author MITSUNARI Shigeo(@herumi) +*/ +#include <limits.h> +#ifndef CYBOZU_DONT_USE_STRING +#include <string> +#endif +#include <memory.h> +#include <cybozu/inttype.hpp> +#include <cybozu/bit_operation.hpp> + +namespace cybozu { + +template<class T> +size_t getHexLength(T x) +{ + return x == 0 ? 1 : cybozu::bsr(x) / 4 + 1; +} + +template<class T> +size_t getBinLength(T x) +{ + return x == 0 ? 1 : cybozu::bsr(x) + 1; +} +/* + convert x to hex string with len + @note out should have getHexLength(x) size + out is not NUL terminated +*/ +template<class T> +void itohex(char *out, size_t len, T x, bool upCase = true) +{ + static const char *hexTbl[] = { + "0123456789abcdef", + "0123456789ABCDEF" + }; + const char *tbl = hexTbl[upCase]; + for (size_t i = 0; i < len; i++) { + out[len - i - 1] = tbl[x % 16]; + x /= 16; + } +} +/* + convert x to bin string with len + @note out should have getBinLength(x) size + out is not NUL terminated +*/ +template<class T> +void itobin(char *out, size_t len, T x) +{ + for (size_t i = 0; i < len; i++) { + out[len - i - 1] = '0' + (x & 1); + x >>= 1; + } +} + +namespace itoa_local { + +/* + convert x to dec + use buf[0, bufSize) + return 0 if false + return writtenSize which is not terminated + @REMARK the top of string is buf + bufSize - writtenSize +*/ +template<class UT> +size_t uintToDec(char *buf, size_t bufSize, UT x) +{ + for (size_t i = 0; i < bufSize; i++) { + buf[bufSize - 1 - i] = '0' + static_cast<int>(x % 10); + x /= 10; + if (x == 0) return i + 1; + } + return 0; +} + +/* + convert x to hex + use buf[0, bufSize) + return 0 if false + return writtenSize which is not terminated + @REMARK the top of string is buf + bufSize - writtenSize +*/ +template<class UT> +size_t uintToHex(char *buf, size_t bufSize, UT x, bool upCase = true) +{ + static const char *hexTbl[] = { + "0123456789abcdef", + "0123456789ABCDEF" + }; + const char *tbl = hexTbl[upCase]; + for (size_t i = 0; i < bufSize; i++) { + buf[bufSize - 1 - i] = tbl[x % 16]; + x /= 16; + if (x == 0) return i + 1; + } + return 0; +} + +/* + convert x to bin + use buf[0, bufSize) + return 0 if false + return writtenSize which is not terminated + @REMARK the top of string is buf + bufSize - writtenSize +*/ +template<class UT> +size_t uintToBin(char *buf, size_t bufSize, UT x) +{ + for (size_t i = 0; i < bufSize; i++) { + buf[bufSize - 1 - i] = '0' + (x & 1); + x >>= 1; + if (x == 0) return i + 1; + } + return 0; +} + +template<class T> +size_t intToDec(char *buf, size_t bufSize, T x) +{ + if (x == LLONG_MIN) { + const char minStr[] = "-9223372036854775808"; + const size_t minStrLen = sizeof(minStr) - 1; + if (bufSize < minStrLen) { + return 0; + } else { + memcpy(buf + bufSize - minStrLen, minStr, minStrLen); + return minStrLen; + } + } + bool negative = x < 0; + uint64_t absX = negative ? -x : x; + size_t n = uintToDec(buf, bufSize, absX); + if (n == 0) return 0; + if (negative) { + if (bufSize == n) return 0; + n++; + buf[bufSize - n] = '-'; + } + return n; +} + +#ifndef CYBOZU_DONT_USE_STRING +template<typename T> +void convertFromUint(std::string& out, T x) +{ + char buf[40]; + size_t n = uintToDec(buf, sizeof(buf), x); + assert(n > 0); + out.assign(buf + sizeof(buf) - n, n); +} + +inline void convertFromInt(std::string& out, long long x) +{ + char buf[40]; + size_t n = intToDec(buf, sizeof(buf), x); + assert(n > 0); + out.assign(buf + sizeof(buf) - n, n); +} + +template<typename T> +void itohexLocal(std::string& out, T x, bool upCase, bool withZero) +{ + const size_t size = withZero ? sizeof(T) * 2 : getHexLength(x); + out.resize(size); + itohex(&out[0], size, x, upCase); +} + +template<class T> +void itobinLocal(std::string& out, T x, bool withZero) +{ + const size_t size = withZero ? sizeof(T) * 8 : getBinLength(x); + out.resize(size); + itobin(&out[0], size, x); +} +#endif + +} // itoa_local + +#ifndef CYBOZU_DONT_USE_STRING +/** + convert int to string + @param out [out] string + @param x [in] int +*/ +inline void itoa(std::string& out, int x) +{ + itoa_local::convertFromInt(out, x); +} + +/** + convert long long to string + @param out [out] string + @param x [in] long long +*/ +inline void itoa(std::string& out, long long x) +{ + itoa_local::convertFromInt(out, x); +} + +/** + convert unsigned int to string + @param out [out] string + @param x [in] unsigned int +*/ +inline void itoa(std::string& out, unsigned int x) +{ + itoa_local::convertFromUint(out, x); +} + +/** + convert unsigned long long to string + @param out [out] string + @param x [in] unsigned long long +*/ +inline void itoa(std::string& out, unsigned long long x) +{ + itoa_local::convertFromUint(out, x); +} + +#if defined(__SIZEOF_LONG__) && (__SIZEOF_LONG__ == 8) +inline void itoa(std::string& out, long x) { itoa(out, static_cast<long long>(x)); } +inline void itoa(std::string& out, unsigned long x) { itoa(out, static_cast<unsigned long long>(x)); } +#else +inline void itoa(std::string& out, long x) { itoa(out, static_cast<int>(x)); } +inline void itoa(std::string& out, unsigned long x) { itoa(out, static_cast<int>(x)); } +#endif +/** + convert integer to string + @param x [in] int +*/ +template<typename T> +inline std::string itoa(T x) +{ + std::string ret; + itoa(ret, x); + return ret; +} + +inline void itohex(std::string& out, unsigned char x, bool upCase = true, bool withZero = true) +{ + itoa_local::itohexLocal(out, x, upCase, withZero); +} + +inline void itohex(std::string& out, unsigned short x, bool upCase = true, bool withZero = true) +{ + itoa_local::itohexLocal(out, x, upCase, withZero); +} + +inline void itohex(std::string& out, unsigned int x, bool upCase = true, bool withZero = true) +{ + itoa_local::itohexLocal(out, x, upCase, withZero); +} + +inline void itohex(std::string& out, unsigned long x, bool upCase = true, bool withZero = true) +{ + itoa_local::itohexLocal(out, x, upCase, withZero); +} + +inline void itohex(std::string& out, unsigned long long x, bool upCase = true, bool withZero = true) +{ + itoa_local::itohexLocal(out, x, upCase, withZero); +} + +template<typename T> +inline std::string itobin(T x, bool withZero = true) +{ + std::string out; + itoa_local::itobinLocal(out, x, withZero); + return out; +} + +inline void itobin(std::string& out, unsigned char x, bool withZero = true) +{ + itoa_local::itobinLocal(out, x, withZero); +} + +inline void itobin(std::string& out, unsigned short x, bool withZero = true) +{ + itoa_local::itobinLocal(out, x, withZero); +} + +inline void itobin(std::string& out, unsigned int x, bool withZero = true) +{ + itoa_local::itobinLocal(out, x, withZero); +} + +inline void itobin(std::string& out, unsigned long x, bool withZero = true) +{ + itoa_local::itobinLocal(out, x, withZero); +} + +inline void itobin(std::string& out, unsigned long long x, bool withZero = true) +{ + itoa_local::itobinLocal(out, x, withZero); +} + +template<typename T> +inline std::string itohex(T x, bool upCase = true, bool withZero = true) +{ + std::string out; + itohex(out, x, upCase, withZero); + return out; +} +/** + convert integer to string with zero padding + @param x [in] int + @param len [in] minimum lengh of string + @param c [in] padding character + @note + itoa(12, 4) == "0012" + itoa(1234, 4) == "1234" + itoa(12345, 4) == "12345" + itoa(-12, 4) == "-012" +*/ +template<typename T> +inline std::string itoaWithZero(T x, size_t len, char c = '0') +{ + std::string ret; + itoa(ret, x); + if (ret.size() < len) { + std::string zero(len - ret.size(), c); + if (x >= 0) { + ret = zero + ret; + } else { + ret = "-" + zero + ret.substr(1); + } + } + return ret; +} +#endif + +} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_libeay32.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_libeay32.hpp new file mode 100644 index 000000000..d83f1b6ea --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_libeay32.hpp @@ -0,0 +1,21 @@ +#pragma once +/** + @file + @brief link libeay32.lib of openssl + @author MITSUNARI Shigeo(@herumi) +*/ +#if defined(_WIN32) && defined(_MT) + #if _MSC_VER >= 1900 // VC2015 + #ifdef _WIN64 + #pragma comment(lib, "mt/14/libeay32.lib") + #else + #pragma comment(lib, "mt/14/32/libeay32.lib") + #endif +// #elif _MSC_VER == 1800 // VC2013 + #else + #pragma comment(lib, "mt/12/libeay32.lib") + #endif + #pragma comment(lib, "advapi32.lib") + #pragma comment(lib, "gdi32.lib") + #pragma comment(lib, "user32.lib") +#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_mpir.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_mpir.hpp new file mode 100644 index 000000000..d20d7b1a9 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_mpir.hpp @@ -0,0 +1,18 @@ +#pragma once +/** + @file + @brief link mpir/mpirxx of mpir + @author MITSUNARI Shigeo(@herumi) +*/ +#if defined(_WIN32) && defined(_MT) + #if _MSC_VER >= 1900 // VC2015, VC2017(1910) + #pragma comment(lib, "mt/14/mpir.lib") + #pragma comment(lib, "mt/14/mpirxx.lib") + #elif _MSC_VER == 1800 // VC2013 + #pragma comment(lib, "mt/12/mpir.lib") + #pragma comment(lib, "mt/12/mpirxx.lib") + #elif _MSC_VER == 1700 // VC2012 + #pragma comment(lib, "mt/11/mpir.lib") + #pragma comment(lib, "mt/11/mpirxx.lib") + #endif +#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_ssleay32.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_ssleay32.hpp new file mode 100644 index 000000000..60c2361ae --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/link_ssleay32.hpp @@ -0,0 +1,19 @@ +#pragma once +/** + @file + @brief link ssleay32.lib of openssl + @author MITSUNARI Shigeo(@herumi) +*/ +#if defined(_WIN32) && defined(_MT) + #if _MSC_VER >= 1900 // VC2015 + #ifdef _WIN64 + #pragma comment(lib, "mt/14/ssleay32.lib") + #else + #pragma comment(lib, "mt/14/32/ssleay32.lib") + #endif +// #elif _MSC_VER == 1800 // VC2013 + #else + #pragma comment(lib, "mt/12/ssleay32.lib") + #endif + #pragma comment(lib, "user32.lib") +#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/mutex.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/mutex.hpp new file mode 100644 index 000000000..acde6bcbf --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/mutex.hpp @@ -0,0 +1,141 @@ +#pragma once +/** + @file + @brief mutex + + @author MITSUNARI Shigeo(@herumi) + @author MITSUNARI Shigeo +*/ + +#ifdef _WIN32 + #include <windows.h> +#else + #include <pthread.h> + #include <time.h> +#endif +#include <assert.h> +#include <stdlib.h> + +namespace cybozu { + +class ConditionVariable; + +namespace thread { + +#ifdef _WIN32 + typedef HANDLE MutexHandle; + inline void MutexInit(MutexHandle& mutex) + { +// mutex = CreateSemaphore(NULL /* no security */, 1 /* init */, 0x7FFFFFFF /* max */, NULL /* no name */); + mutex = CreateMutex(NULL /* no security */, FALSE /* no owner */, NULL /* no name */); + } + inline void MutexLock(MutexHandle& mutex) { WaitForSingleObject(mutex, INFINITE); } + /* + return false if timeout + @param msec [in] msec + */ + inline bool MutexLockTimeout(MutexHandle& mutex, int msec) + { + DWORD ret = WaitForSingleObject(mutex, msec); + if (ret == WAIT_OBJECT_0) { + return true; + } + if (ret == WAIT_TIMEOUT) { + return false; + } + /* ret == WAIT_ABANDONED */ + assert(0); + return false; + } + inline void MutexUnlock(MutexHandle& mutex) + { +// ReleaseSemaphore(mutex, 1, NULL); + ReleaseMutex(mutex); + } + inline void MutexTerm(MutexHandle& mutex) { CloseHandle(mutex); } +#else + typedef pthread_mutex_t MutexHandle; + inline void MutexInit(MutexHandle& mutex) + { +#if 1 + pthread_mutex_init(&mutex, NULL); +#else + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_TIMED_NP)) { + perror("pthread_mutexattr_settype"); + exit(1); + } + pthread_mutex_init(&mutex, &attr); + pthread_mutexattr_destroy(&attr); +#endif + } + inline void MutexLock(MutexHandle& mutex) { pthread_mutex_lock(&mutex); } +#if 0 + inline bool MutexLockTimeout(MutexHandle& mutex, int msec) + { + timespec absTime; + clock_gettime(CLOCK_REALTIME, &absTime); + absTime.tv_sec += msec / 1000; + absTime.tv_nsec += msec % 1000; + bool ret = pthread_mutex_timedlock(&mutex, &absTime) == 0; + return ret; + } +#endif + inline void MutexUnlock(MutexHandle& mutex) { pthread_mutex_unlock(&mutex); } + inline void MutexTerm(MutexHandle& mutex) { pthread_mutex_destroy(&mutex); } +#endif + +template<class T> +class AutoLockT { +public: + explicit AutoLockT(T &t) + : t_(t) + { + t_.lock(); + } + ~AutoLockT() + { + t_.unlock(); + } +private: + T& t_; + AutoLockT& operator=(const AutoLockT&); +}; + +} // cybozu::thread + +class Mutex { + friend class cybozu::ConditionVariable; +public: + Mutex() + { + thread::MutexInit(hdl_); + } + ~Mutex() + { + thread::MutexTerm(hdl_); + } + void lock() + { + thread::MutexLock(hdl_); + } +#if 0 + bool lockTimeout(int msec) + { + return thread::MutexLockTimeout(hdl_, msec); + } +#endif + void unlock() + { + thread::MutexUnlock(hdl_); + } +private: + Mutex(const Mutex&); + Mutex& operator=(const Mutex&); + thread::MutexHandle hdl_; +}; + +typedef cybozu::thread::AutoLockT<cybozu::Mutex> AutoLock; + +} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/option.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/option.hpp new file mode 100644 index 000000000..a5dfd137d --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/option.hpp @@ -0,0 +1,723 @@ +#pragma once +/** + @file + @brief command line parser + + @author MITSUNARI Shigeo(@herumi) +*/ +#include <string> +#include <vector> +#include <map> +#include <sstream> +#include <iostream> +#include <limits> +#include <stdio.h> +#include <stdlib.h> +#include <assert.h> +#include <cybozu/exception.hpp> +#include <cybozu/atoi.hpp> + +/* + Option parser + + progName (opt1-name|opt2-name|...) param1 param2 ... + param1:param1-help + param2:param2-help + -op1-name:opt1-help + ... + + How to setup + int num; + -n num ; (optional) option => appendOpt(&x, <defaultValue>, "num", "num-help"); + -n num ; must option => appendMust(&x, "num", "num-help"); + + std::vector<int> v; + -v s1 s2 s3 ... => appendVec(&v, "v"); + + Remark1: terminate parsing of v if argv begins with '-[^0-9]' + Remark2: the begining character of opt-name is not a number ('0'...'9') + because avoid conflict with minus number + + std::string file1; + file1 is param => appendParam(&file1, "input-file"); + file2 is optional param => appendParamOpt(&file2, "output-file"); + + How to use + opt.parse(argc, argv); + + see sample/option_smpl.cpp +*/ + +namespace cybozu { + +struct OptionError : public cybozu::Exception { + enum Type { + NoError = 0, + BAD_OPT = 1, + BAD_VALUE, + NO_VALUE, + OPT_IS_NECESSARY, + PARAM_IS_NECESSARY, + REDUNDANT_VAL, + BAD_ARGC + }; + Type type; + int argPos; + OptionError() + : cybozu::Exception("OptionError", false) + , type(NoError) + , argPos(0) + { + } + cybozu::Exception& set(Type _type, int _argPos = 0) + { + this->type = _type; + this->argPos = _argPos; + switch (_type) { + case BAD_OPT: + (*this) << "bad opt"; + break; + case BAD_VALUE: + (*this) << "bad value"; + break; + case NO_VALUE: + (*this) << "no value"; + break; + case OPT_IS_NECESSARY: + (*this) << "opt is necessary"; + break; + case PARAM_IS_NECESSARY: + (*this) << "param is necessary"; + break; + case REDUNDANT_VAL: + (*this) << "redundant argVal"; + break; + case BAD_ARGC: + (*this) << "bad argc"; + default: + break; + } + return *this; + } +}; + +namespace option_local { + +template<class T> +bool convert(T* x, const char *str) +{ + std::istringstream is(str); + is >> *x; + return !!is; +} + +template<> +inline bool convert(std::string* x, const char *str) +{ + *x = str; + return true; +} + +template<class T> +bool convertInt(T* x, const char *str) +{ + if (str[0] == '0' && str[1] == 'x') { + bool b; + *x = cybozu::hextoi(&b, str + 2); + return b; + } + size_t len = strlen(str); + int factor = 1; + if (len > 1) { + switch (str[len - 1]) { + case 'k': factor = 1000; len--; break; + case 'm': factor = 1000 * 1000; len--; break; + case 'g': factor = 1000 * 1000 * 1000; len--; break; + case 'K': factor = 1024; len--; break; + case 'M': factor = 1024 * 1024; len--; break; + case 'G': factor = 1024 * 1024 * 1024; len--; break; + default: break; + } + } + bool b; + T y = cybozu::atoi(&b, str, len); + if (!b) return false; + if (factor > 1) { + if ((std::numeric_limits<T>::min)() / factor <= y + && y <= (std::numeric_limits<T>::max)() / factor) { + *x = y * factor; + } else { + return false; + } + } else { + *x = y; + } + return true; +} + +#define CYBOZU_OPTION_DEFINE_CONVERT_INT(type) \ +template<>inline bool convert(type* x, const char *str) { return convertInt(x, str); } + +CYBOZU_OPTION_DEFINE_CONVERT_INT(int) +CYBOZU_OPTION_DEFINE_CONVERT_INT(long) +CYBOZU_OPTION_DEFINE_CONVERT_INT(long long) + +CYBOZU_OPTION_DEFINE_CONVERT_INT(unsigned int) +CYBOZU_OPTION_DEFINE_CONVERT_INT(unsigned long) +CYBOZU_OPTION_DEFINE_CONVERT_INT(unsigned long long) + +#undef CYBOZU_OPTION_DEFINE_CONVERT_INT + +struct HolderBase { + virtual ~HolderBase(){} + virtual bool set(const char*) = 0; + virtual HolderBase *clone() const = 0; + virtual std::string toStr() const = 0; + virtual const void *get() const = 0; +}; + +template<class T> +struct Holder : public HolderBase { + T *p_; + Holder(T *p) : p_(p) {} + HolderBase *clone() const { return new Holder(p_); } + bool set(const char *str) { return option_local::convert(p_, str); } + std::string toStr() const + { + std::ostringstream os; + os << *p_; + return os.str(); + } + const void *get() const { return (void*)p_; } +}; + +/* + for gcc 7 with -fnew-ttp-matching + this specialization is not necessary under -fno-new-ttp-matching +*/ +template struct Holder<std::string>; + +template<class T, class Alloc, template<class T_, class Alloc_>class Container> +struct Holder<Container<T, Alloc> > : public HolderBase { + typedef Container<T, Alloc> Vec; + Vec *p_; + Holder(Vec *p) : p_(p) {} + HolderBase *clone() const { return new Holder<Vec>(p_); } + bool set(const char *str) + { + T t; + bool b = option_local::convert(&t, str); + if (b) p_->push_back(t); + return b; + } + std::string toStr() const + { + std::ostringstream os; + bool isFirst = true; + for (typename Vec::const_iterator i = p_->begin(), ie = p_->end(); i != ie; ++i) { + if (isFirst) { + isFirst = false; + } else { + os << ' '; + } + os << *i; + } + return os.str(); + } + const void *get() const { return (void*)p_; } +}; + +class Var { + HolderBase *p_; + bool isSet_; +public: + Var() : p_(0), isSet_(false) { } + Var(const Var& rhs) : p_(rhs.p_->clone()), isSet_(false) { } + template<class T> + explicit Var(T *x) : p_(new Holder<T>(x)), isSet_(false) { } + + ~Var() { delete p_; } + + void swap(Var& rhs) CYBOZU_NOEXCEPT + { + std::swap(p_, rhs.p_); + std::swap(isSet_, rhs.isSet_); + } + void operator=(const Var& rhs) + { + Var v(rhs); + swap(v); + } + bool set(const char *str) + { + isSet_ = true; + return p_->set(str); + } + std::string toStr() const { return p_ ? p_->toStr() : ""; } + bool isSet() const { return isSet_; } + const void *get() const { return p_ ? p_->get() : 0; } +}; + +} // option_local + +class Option { + enum Mode { // for opt + N_is0 = 0, // for bool by appendBoolOpt() + N_is1 = 1, + N_any = 2 + }; + enum ParamMode { + P_exact = 0, // one + P_optional = 1, // zero or one + P_variable = 2 // zero or greater + }; + struct Info { + option_local::Var var; + Mode mode; // 0 or 1 or any ; for opt, not used for Param + bool isMust; // this option is must + std::string opt; // option param name without '-' + std::string help; // description of option + + Info() : mode(N_is0), isMust(false) {} + template<class T> + Info(T* pvar, Mode mode, bool isMust, const char *opt, const std::string& help) + : var(pvar) + , mode(mode) + , isMust(isMust) + , opt(opt) + , help(help) + { + } + friend inline std::ostream& operator<<(std::ostream& os, const Info& self) + { + os << self.opt << '=' << self.var.toStr(); + if (self.var.isSet()) { + os << " (set)"; + } else { + os << " (default)"; + } + return os; + } + void put() const + { + std::cout << *this; + } + void usage() const + { + printf(" -%s %s%s\n", opt.c_str(), help.c_str(), isMust ? " (must)" : ""); + } + void shortUsage() const + { + printf(" -%s %s", opt.c_str(), mode == N_is0 ? "" : mode == N_is1 ? "para" : "para..."); + } + bool isSet() const { return var.isSet(); } + const void *get() const { return var.get(); } + }; + typedef std::vector<Info> InfoVec; + typedef std::vector<std::string> StrVec; + typedef std::map<std::string, size_t> OptMap; + InfoVec infoVec_; + InfoVec paramVec_; + Info remains_; + OptMap optMap_; + bool showOptUsage_; + ParamMode paramMode_; + std::string progName_; + std::string desc_; + std::string helpOpt_; + std::string help_; + std::string usage_; + StrVec delimiters_; + StrVec *remainsAfterDelimiter_; + int nextDelimiter_; + template<class T> + void appendSub(T *pvar, Mode mode, bool isMust, const char *opt, const std::string& help) + { + const char c = opt[0]; + if ('0' <= c && c <= '9') throw cybozu::Exception("Option::appendSub:opt must begin with not number") << opt; + if (optMap_.find(opt) != optMap_.end()) { + throw cybozu::Exception("Option::append:duplicate option") << opt; + } + optMap_[opt] = infoVec_.size(); + infoVec_.push_back(Info(pvar, mode, isMust, opt, help)); + } + + template<class T, class U> + void append(T *pvar, const U& defaultVal, bool isMust, const char *opt, const std::string& help = "") + { + *pvar = defaultVal; + appendSub(pvar, N_is1, isMust, opt, help); + } + /* + don't deal with negative number as option + */ + bool isOpt(const char *str) const + { + if (str[0] != '-') return false; + const char c = str[1]; + if ('0' <= c && c <= '9') return false; + return true; + } + void verifyParamMode() + { + if (paramMode_ != P_exact) throw cybozu::Exception("Option:appendParamVec:appendParam is forbidden after appendParamOpt/appendParamVec"); + } + std::string getBaseName(const std::string& name) const + { + size_t pos = name.find_last_of("/\\"); + if (pos == std::string::npos) return name; + return name.substr(pos + 1); + } + bool inDelimiters(const std::string& str) const + { + return std::find(delimiters_.begin(), delimiters_.end(), str) != delimiters_.end(); + } +public: + Option() + : showOptUsage_(true) + , paramMode_(P_exact) + , remainsAfterDelimiter_(0) + , nextDelimiter_(-1) + { + } + virtual ~Option() {} + /* + append optional option with default value + @param pvar [in] pointer to option variable + @param defaultVal [in] default value + @param opt [in] option name + @param help [in] option help + @note you can use 123k, 56M if T is int/long/long long + k : *1000 + m : *1000000 + g : *1000000000 + K : *1024 + M : *1024*1024 + G : *1024*1024*1024 + */ + template<class T, class U> + void appendOpt(T *pvar, const U& defaultVal, const char *opt, const std::string& help = "") + { + append(pvar, defaultVal, false, opt, help); + } + /* + default value of *pvar is false + */ + void appendBoolOpt(bool *pvar, const char *opt, const std::string& help = "") + { + *pvar = false; + appendSub(pvar, N_is0, false, opt, help); + } + /* + append necessary option + @param pvar [in] pointer to option variable + @param opt [in] option name + @param help [in] option help + */ + template<class T> + void appendMust(T *pvar, const char *opt, const std::string& help = "") + { + append(pvar, T(), true, opt, help); + } + /* + append vector option + @param pvar [in] pointer to option variable + @param opt [in] option name + @param help [in] option help + */ + template<class T, class Alloc, template<class T_, class Alloc_>class Container> + void appendVec(Container<T, Alloc> *pvar, const char *opt, const std::string& help = "") + { + appendSub(pvar, N_any, false, opt, help); + } + /* + append parameter + @param pvar [in] pointer to parameter + @param opt [in] option name + @param help [in] option help + */ + template<class T> + void appendParam(T *pvar, const char *opt, const std::string& help = "") + { + verifyParamMode(); + paramVec_.push_back(Info(pvar, N_is1, true, opt, help)); + } + /* + append optional parameter + @param pvar [in] pointer to parameter + @param defaultVal [in] default value + @param opt [in] option name + @param help [in] option help + @note you can call appendParamOpt once after appendParam + */ + template<class T, class U> + void appendParamOpt(T *pvar, const U& defaultVal, const char *opt, const std::string& help = "") + { + verifyParamMode(); + *pvar = defaultVal; + paramMode_ = P_optional; + paramVec_.push_back(Info(pvar, N_is1, false, opt, help)); + } + /* + append remain parameter + @param pvar [in] pointer to vector of parameter + @param opt [in] option name + @param help [in] option help + @note you can call appendParamVec once after appendParam + */ + template<class T, class Alloc, template<class T_, class Alloc_>class Container> + void appendParamVec(Container<T, Alloc> *pvar, const char *name, const std::string& help = "") + { + verifyParamMode(); + paramMode_ = P_variable; + remains_.var = option_local::Var(pvar); + remains_.mode = N_any; + remains_.isMust = false; + remains_.opt = name; + remains_.help = help; + } + void appendHelp(const char *opt, const std::string& help = ": show this message") + { + helpOpt_ = opt; + help_ = help; + } + /* + stop parsing after delimiter is found + @param delimiter [in] string to stop + @param remain [out] set remaining strings if remain + */ + void setDelimiter(const std::string& delimiter, std::vector<std::string> *remain = 0) + { + delimiters_.push_back(delimiter); + remainsAfterDelimiter_ = remain; + } + /* + stop parsing after delimiter is found + @param delimiter [in] string to stop to append list of delimiters + */ + void appendDelimiter(const std::string& delimiter) + { + delimiters_.push_back(delimiter); + } + /* + clear list of delimiters + */ + void clearDelimiterList() { delimiters_.clear(); } + /* + return the next position of delimiter between [0, argc] + @note return argc if delimiter is not set nor found + */ + int getNextPositionOfDelimiter() const { return nextDelimiter_; } + /* + parse (argc, argv) + @param argc [in] argc of main + @param argv [in] argv of main + @param startPos [in] start position of argc + @param progName [in] used instead of argv[0] + */ + bool parse(int argc, const char *const argv[], int startPos = 1, const char *progName = 0) + { + if (argc < 1 || startPos > argc) return false; + progName_ = getBaseName(progName ? progName : argv[startPos - 1]); + nextDelimiter_ = argc; + OptionError err; + for (int pos = startPos; pos < argc; pos++) { + if (inDelimiters(argv[pos])) { + nextDelimiter_ = pos + 1; + if (remainsAfterDelimiter_) { + for (int i = nextDelimiter_; i < argc; i++) { + remainsAfterDelimiter_->push_back(argv[i]); + } + } + break; + } + if (isOpt(argv[pos])) { + const std::string str = argv[pos] + 1; + if (helpOpt_ == str) { + usage(); + exit(0); + } + OptMap::const_iterator i = optMap_.find(str); + if (i == optMap_.end()) { + err.set(OptionError::BAD_OPT, pos); + goto ERR; + } + + Info& info = infoVec_[i->second]; + switch (info.mode) { + case N_is0: + if (!info.var.set("1")) { + err.set(OptionError::BAD_VALUE, pos); + goto ERR; + } + break; + case N_is1: + pos++; + if (pos == argc) { + err.set(OptionError::BAD_VALUE, pos) << (std::string("no value for -") + info.opt); + goto ERR; + } + if (!info.var.set(argv[pos])) { + err.set(OptionError::BAD_VALUE, pos) << (std::string(argv[pos]) + " for -" + info.opt); + goto ERR; + } + break; + case N_any: + default: + { + pos++; + int j = 0; + while (pos < argc && !isOpt(argv[pos])) { + if (!info.var.set(argv[pos])) { + err.set(OptionError::BAD_VALUE, pos) << (std::string(argv[pos]) + " for -" + info.opt) << j; + goto ERR; + } + pos++; + j++; + } + if (j > 0) { + pos--; + } else { + err.set(OptionError::NO_VALUE, pos) << (std::string("for -") + info.opt); + goto ERR; + } + } + break; + } + } else { + bool used = false; + for (size_t i = 0; i < paramVec_.size(); i++) { + Info& param = paramVec_[i]; + if (!param.var.isSet()) { + if (!param.var.set(argv[pos])) { + err.set(OptionError::BAD_VALUE, pos) << (std::string(argv[pos]) + " for " + param.opt); + goto ERR; + } + used = true; + break; + } + } + if (!used) { + if (paramMode_ == P_variable) { + remains_.var.set(argv[pos]); + } else { + err.set(OptionError::REDUNDANT_VAL, pos) << argv[pos]; + goto ERR; + } + } + } + } + // check whether must-opt is set + for (size_t i = 0; i < infoVec_.size(); i++) { + const Info& info = infoVec_[i]; + if (info.isMust && !info.var.isSet()) { + err.set(OptionError::OPT_IS_NECESSARY) << info.opt; + goto ERR; + } + } + // check whether param is set + for (size_t i = 0; i < paramVec_.size(); i++) { + const Info& param = paramVec_[i]; + if (param.isMust && !param.var.isSet()) { + err.set(OptionError::PARAM_IS_NECESSARY) << param.opt; + goto ERR; + } + } + // check whether remains is set + if (paramMode_ == P_variable && remains_.isMust && !remains_.var.isSet()) { + err.set(OptionError::PARAM_IS_NECESSARY) << remains_.opt; + goto ERR; + } + return true; + ERR: + assert(err.type); + printf("%s\n", err.what()); + return false; + } + /* + show desc at first in usage() + */ + void setDescription(const std::string& desc) + { + desc_ = desc; + } + /* + show command line after desc + don't put option message if not showOptUsage + */ + void setUsage(const std::string& usage, bool showOptUsage = false) + { + usage_ = usage; + showOptUsage_ = showOptUsage; + } + void usage() const + { + if (!desc_.empty()) printf("%s\n", desc_.c_str()); + if (usage_.empty()) { + printf("usage:%s", progName_.c_str()); + if (!infoVec_.empty()) printf(" [opt]"); + for (size_t i = 0; i < infoVec_.size(); i++) { + if (infoVec_[i].isMust) infoVec_[i].shortUsage(); + } + for (size_t i = 0; i < paramVec_.size(); i++) { + printf(" %s", paramVec_[i].opt.c_str()); + } + if (paramMode_ == P_variable) { + printf(" %s", remains_.opt.c_str()); + } + printf("\n"); + } else { + printf("%s\n", usage_.c_str()); + if (!showOptUsage_) return; + } + for (size_t i = 0; i < paramVec_.size(); i++) { + const Info& param = paramVec_[i]; + if (!param.help.empty()) printf(" %s %s\n", paramVec_[i].opt.c_str(), paramVec_[i].help.c_str()); + } + if (!remains_.help.empty()) printf(" %s %s\n", remains_.opt.c_str(), remains_.help.c_str()); + if (!helpOpt_.empty()) { + printf(" -%s %s\n", helpOpt_.c_str(), help_.c_str()); + } + for (size_t i = 0; i < infoVec_.size(); i++) { + infoVec_[i].usage(); + } + } + friend inline std::ostream& operator<<(std::ostream& os, const Option& self) + { + for (size_t i = 0; i < self.paramVec_.size(); i++) { + const Info& param = self.paramVec_[i]; + os << param.opt << '=' << param.var.toStr() << std::endl; + } + if (self.paramMode_ == P_variable) { + os << "remains=" << self.remains_.var.toStr() << std::endl; + } + for (size_t i = 0; i < self.infoVec_.size(); i++) { + os << self.infoVec_[i] << std::endl; + } + return os; + } + void put() const + { + std::cout << *this; + } + /* + whether pvar is set or not + */ + template<class T> + bool isSet(const T* pvar) const + { + const void *p = static_cast<const void*>(pvar); + for (size_t i = 0; i < paramVec_.size(); i++) { + const Info& v = paramVec_[i]; + if (v.get() == p) return v.isSet(); + } + if (remains_.get() == p) return remains_.isSet(); + for (size_t i = 0; i < infoVec_.size(); i++) { + const Info& v = infoVec_[i]; + if (v.get() == p) return v.isSet(); + } + throw cybozu::Exception("Option:isSet:no assigned var") << pvar; + } +}; + +} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/random_generator.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/random_generator.hpp new file mode 100644 index 000000000..23096989d --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/random_generator.hpp @@ -0,0 +1,139 @@ +#pragma once +/** + @file + @brief pseudrandom generator + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ + +#include <cybozu/exception.hpp> +#ifdef _WIN32 +#include <winsock2.h> +#include <windows.h> +#include <wincrypt.h> +#ifdef _MSC_VER +#pragma comment (lib, "advapi32.lib") +#endif +#include <cybozu/critical_section.hpp> +#else +#include <sys/types.h> +#include <fcntl.h> +#endif + +namespace cybozu { + +class RandomGenerator { + RandomGenerator(const RandomGenerator&); + void operator=(const RandomGenerator&); +public: + uint32_t operator()() + { + return get32(); + } + uint32_t get32() + { + uint32_t ret; + read(&ret, 1); + return ret; + } + uint64_t get64() + { + uint64_t ret; + read(&ret, 1); + return ret; + } +#ifdef _WIN32 + RandomGenerator() + : prov_(0) + , pos_(bufSize) + { + DWORD flagTbl[] = { 0, CRYPT_NEWKEYSET }; + for (int i = 0; i < 2; i++) { + if (CryptAcquireContext(&prov_, NULL, NULL, PROV_RSA_FULL, flagTbl[i]) != 0) return; + } + throw cybozu::Exception("randomgenerator"); + } + void read_inner(void *buf, size_t byteSize) + { + if (CryptGenRandom(prov_, static_cast<DWORD>(byteSize), static_cast<BYTE*>(buf)) == 0) { + throw cybozu::Exception("randomgenerator:read") << byteSize; + } + } + ~RandomGenerator() + { + if (prov_) { + CryptReleaseContext(prov_, 0); + } + } + /* + fill buf[0..bufNum-1] with random data + @note bufNum is not byte size + */ + template<class T> + void read(T *buf, size_t bufNum) + { + cybozu::AutoLockCs al(cs_); + const size_t byteSize = sizeof(T) * bufNum; + if (byteSize > bufSize) { + read_inner(buf, byteSize); + } else { + if (pos_ + byteSize > bufSize) { + read_inner(buf_, bufSize); + pos_ = 0; + } + memcpy(buf, buf_ + pos_, byteSize); + pos_ += byteSize; + } + } +private: + HCRYPTPROV prov_; + static const size_t bufSize = 1024; + char buf_[bufSize]; + size_t pos_; + cybozu::CriticalSection cs_; +#else + RandomGenerator() + : fp_(::fopen("/dev/urandom", "rb")) + { + if (!fp_) throw cybozu::Exception("randomgenerator"); + } + ~RandomGenerator() + { + if (fp_) ::fclose(fp_); + } + /* + fill buf[0..bufNum-1] with random data + @note bufNum is not byte size + */ + template<class T> + void read(T *buf, size_t bufNum) + { + const size_t byteSize = sizeof(T) * bufNum; + if (::fread(buf, 1, (int)byteSize, fp_) != byteSize) { + throw cybozu::Exception("randomgenerator:read") << byteSize; + } + } +#endif +private: + FILE *fp_; +}; + +template<class T, class RG> +void shuffle(T* v, size_t n, RG& rg) +{ + if (n <= 1) return; + for (size_t i = 0; i < n - 1; i++) { + size_t r = i + size_t(rg.get64() % (n - i)); + using namespace std; + swap(v[i], v[r]); + } +} + +template<class V, class RG> +void shuffle(V& v, RG& rg) +{ + shuffle(v.data(), v.size(), rg); +} + +} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/serializer.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/serializer.hpp new file mode 100644 index 000000000..1e23c8f42 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/serializer.hpp @@ -0,0 +1,363 @@ +#pragma once +/** + @file + @brief serializer for vector, list, map and so on + + @author MITSUNARI Shigeo(@herumi) +*/ +#include <assert.h> +#include <cybozu/stream.hpp> + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4127) +#endif + +//#define CYBOZU_SERIALIZER_FIXED_SIZE_INTEGER + +namespace cybozu { + +namespace serializer_local { + +template<class T> +union ci { + T i; + uint8_t c[sizeof(T)]; +}; + +template<class S, void (S::*)(size_t)> +struct HasMemFunc { }; + +template<class T> +void dispatch_reserve(T& t, size_t size, int, HasMemFunc<T, &T::reserve>* = 0) +{ + t.reserve(size); +} + +template<class T> +void dispatch_reserve(T&, size_t, int*) +{ +} + +template<class T> +void reserve_if_exists(T& t, size_t size) +{ + dispatch_reserve(t, size, 0); +} + +} // serializer_local + +template<class InputStream, class T> +void loadRange(T *p, size_t num, InputStream& is) +{ + cybozu::read(p, num * sizeof(T), is); +} + +template<class OutputStream, class T> +void saveRange(OutputStream& os, const T *p, size_t num) +{ + cybozu::write(os, p, num * sizeof(T)); +} + +template<class InputStream, class T> +void loadPod(T& x, InputStream& is) +{ + serializer_local::ci<T> ci; + loadRange(ci.c, sizeof(ci.c), is); + x = ci.i; +} + +template<class OutputStream, class T> +void savePod(OutputStream& os, const T& x) +{ + serializer_local::ci<T> ci; + ci.i = x; + saveRange(os, ci.c, sizeof(ci.c)); +} + +template<class InputStream, class T> +void load(T& x, InputStream& is) +{ + x.load(is); +} + +template<class OutputStream, class T> +void save(OutputStream& os, const T& x) +{ + x.save(os); +} + +#define CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(type) \ +template<class InputStream>void load(type& x, InputStream& is) { loadPod(x, is); } \ +template<class OutputStream>void save(OutputStream& os, type x) { savePod(os, x); } + +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(bool) +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(char) +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(short) +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(unsigned char) +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(unsigned short) +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(wchar_t) + +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(float) +CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(double) + +#ifdef CYBOZU_SERIALIZER_FIXED_SIZE_INTEGER + +#define CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(type) CYBOZU_SERIALIZER_MAKE_SERIALIZER_F(type) + +#else + +namespace serializer_local { + +template<class S, class T> +bool isRecoverable(T x) +{ + return T(S(x)) == x; +} +/* + data structure H:D of integer x + H:header(1byte) + 0x80 ; D = 1 byte zero ext + 0x81 ; D = 2 byte zero ext + 0x82 ; D = 4 byte zero ext + 0x83 ; D = 8 byte zero ext + 0x84 ; D = 1 byte signed ext + 0x85 ; D = 2 byte signed ext + 0x86 ; D = 4 byte signed ext + 0x87 ; D = 8 byte signed ext + other; x = signed H, D = none +*/ +template<class OutputStream, class T> +void saveVariableInt(OutputStream& os, const T& x) +{ + if (isRecoverable<int8_t>(x)) { + uint8_t u8 = uint8_t(x); + if (unsigned(u8 - 0x80) <= 7) { + savePod(os, uint8_t(0x84)); + } + savePod(os, u8); + } else if (isRecoverable<uint8_t>(x)) { + savePod(os, uint8_t(0x80)); + savePod(os, uint8_t(x)); + } else if (isRecoverable<uint16_t>(x) || isRecoverable<int16_t>(x)) { + savePod(os, uint8_t(isRecoverable<uint16_t>(x) ? 0x81 : 0x85)); + savePod(os, uint16_t(x)); + } else if (isRecoverable<uint32_t>(x) || isRecoverable<int32_t>(x)) { + savePod(os, uint8_t(isRecoverable<uint32_t>(x) ? 0x82 : 0x86)); + savePod(os, uint32_t(x)); + } else { + assert(sizeof(T) == 8); + savePod(os, uint8_t(0x83)); + savePod(os, uint64_t(x)); + } +} + +template<class InputStream, class T> +void loadVariableInt(T& x, InputStream& is) +{ + uint8_t h; + loadPod(h, is); + if (h == 0x80) { + uint8_t v; + loadPod(v, is); + x = v; + } else if (h == 0x81) { + uint16_t v; + loadPod(v, is); + x = v; + } else if (h == 0x82) { + uint32_t v; + loadPod(v, is); + x = v; + } else if (h == 0x83) { + if (sizeof(T) == 4) throw cybozu::Exception("loadVariableInt:bad header") << h; + uint64_t v; + loadPod(v, is); + x = static_cast<T>(v); + } else if (h == 0x84) { + int8_t v; + loadPod(v, is); + x = v; + } else if (h == 0x85) { + int16_t v; + loadPod(v, is); + x = v; + } else if (h == 0x86) { + int32_t v; + loadPod(v, is); + x = v; + } else if (h == 0x87) { + if (sizeof(T) == 4) throw cybozu::Exception("loadVariableInt:bad header") << h; + int64_t v; + loadPod(v, is); + x = static_cast<T>(v); + } else { + x = static_cast<int8_t>(h); + } +} + +} // serializer_local + +#define CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(type) \ +template<class InputStream>void load(type& x, InputStream& is) { serializer_local::loadVariableInt(x, is); } \ +template<class OutputStream>void save(OutputStream& os, type x) { serializer_local::saveVariableInt(os, x); } + +#endif + +CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(int) +CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(long) +CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(long long) +CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(unsigned int) +CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(unsigned long) +CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER(unsigned long long) + +#undef CYBOZU_SERIALIZER_MAKE_INT_SERIALIZER +#undef CYBOZU_SERIALIZER_MAKE_UNT_SERIALIZER +#undef CYBOZU_SERIALIZER_MAKE_SERIALIZER_F +#undef CYBOZU_SERIALIZER_MAKE_SERIALIZER_V + +// only for std::vector<POD> +template<class V, class InputStream> +void loadPodVec(V& v, InputStream& is) +{ + size_t size; + load(size, is); + v.resize(size); + if (size > 0) loadRange(&v[0], size, is); +} + +// only for std::vector<POD> +template<class V, class OutputStream> +void savePodVec(OutputStream& os, const V& v) +{ + save(os, v.size()); + if (!v.empty()) saveRange(os, &v[0], v.size()); +} + +template<class InputStream> +void load(std::string& str, InputStream& is) +{ + loadPodVec(str, is); +} + +template<class OutputStream> +void save(OutputStream& os, const std::string& str) +{ + savePodVec(os, str); +} + +template<class OutputStream> +void save(OutputStream& os, const char *x) +{ + const size_t len = strlen(x); + save(os, len); + if (len > 0) saveRange(os, x, len); +} + + +// for vector, list +template<class InputStream, class T, class Alloc, template<class T_, class Alloc_>class Container> +void load(Container<T, Alloc>& x, InputStream& is) +{ + size_t size; + load(size, is); + serializer_local::reserve_if_exists(x, size); + for (size_t i = 0; i < size; i++) { + x.push_back(T()); + T& t = x.back(); + load(t, is); + } +} + +template<class OutputStream, class T, class Alloc, template<class T_, class Alloc_>class Container> +void save(OutputStream& os, const Container<T, Alloc>& x) +{ + typedef Container<T, Alloc> V; + save(os, x.size()); + for (typename V::const_iterator i = x.begin(), end = x.end(); i != end; ++i) { + save(os, *i); + } +} + +// for set +template<class InputStream, class K, class Pred, class Alloc, template<class K_, class Pred_, class Alloc_>class Container> +void load(Container<K, Pred, Alloc>& x, InputStream& is) +{ + size_t size; + load(size, is); + for (size_t i = 0; i < size; i++) { + K t; + load(t, is); + x.insert(t); + } +} + +template<class OutputStream, class K, class Pred, class Alloc, template<class K_, class Pred_, class Alloc_>class Container> +void save(OutputStream& os, const Container<K, Pred, Alloc>& x) +{ + typedef Container<K, Pred, Alloc> Set; + save(os, x.size()); + for (typename Set::const_iterator i = x.begin(), end = x.end(); i != end; ++i) { + save(os, *i); + } +} + +// for map +template<class InputStream, class K, class V, class Pred, class Alloc, template<class K_, class V_, class Pred_, class Alloc_>class Container> +void load(Container<K, V, Pred, Alloc>& x, InputStream& is) +{ + typedef Container<K, V, Pred, Alloc> Map; + size_t size; + load(size, is); + for (size_t i = 0; i < size; i++) { + std::pair<typename Map::key_type, typename Map::mapped_type> vt; + load(vt.first, is); + load(vt.second, is); + x.insert(vt); + } +} + +template<class OutputStream, class K, class V, class Pred, class Alloc, template<class K_, class V_, class Pred_, class Alloc_>class Container> +void save(OutputStream& os, const Container<K, V, Pred, Alloc>& x) +{ + typedef Container<K, V, Pred, Alloc> Map; + save(os, x.size()); + for (typename Map::const_iterator i = x.begin(), end = x.end(); i != end; ++i) { + save(os, i->first); + save(os, i->second); + } +} + +// unordered_map +template<class InputStream, class K, class V, class Hash, class Pred, class Alloc, template<class K_, class V_, class Hash_, class Pred_, class Alloc_>class Container> +void load(Container<K, V, Hash, Pred, Alloc>& x, InputStream& is) +{ + typedef Container<K, V, Hash, Pred, Alloc> Map; + size_t size; + load(size, is); +// x.reserve(size); // tr1::unordered_map may not have reserve + cybozu::serializer_local::reserve_if_exists(x, size); + for (size_t i = 0; i < size; i++) { + std::pair<typename Map::key_type, typename Map::mapped_type> vt; + load(vt.first, is); + load(vt.second, is); + x.insert(vt); + } +} + +template<class OutputStream, class K, class V, class Hash, class Pred, class Alloc, template<class K_, class V_, class Hash_, class Pred_, class Alloc_>class Container> +void save(OutputStream& os, const Container<K, V, Hash, Pred, Alloc>& x) +{ + typedef Container<K, V, Hash, Pred, Alloc> Map; + save(os, x.size()); + for (typename Map::const_iterator i = x.begin(), end = x.end(); i != end; ++i) { + save(os, i->first); + save(os, i->second); + } +} + +} // cybozu + +#ifdef _MSC_VER + #pragma warning(pop) +#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/sha2.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/sha2.hpp new file mode 100644 index 000000000..97193849b --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/sha2.hpp @@ -0,0 +1,438 @@ +#pragma once +/** + @file + @brief SHA-256, SHA-512 class + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <cybozu/endian.hpp> +#ifndef CYBOZU_DONT_USE_STRING +#include <cybozu/itoa.hpp> +#include <string> +#endif +#include <memory.h> +#include <assert.h> + +namespace cybozu { + +namespace sha2_local { + +template<class T> +T min_(T x, T y) { return x < y ? x : y;; } + +#ifndef CYBOZU_DONT_USE_STRING +inline void uint32toHexStr(char *buf, const uint32_t *x, size_t n) +{ + for (size_t i = 0; i < n; i++) { + cybozu::itohex(buf + i * 8, 8, x[i], false); + } +} + +inline void uint64toHexStr(char *buf, const uint64_t *x, size_t n) +{ + for (size_t i = 0; i < n; i++) { + cybozu::itohex(buf + i * 16, 16, x[i], false); + } +} +#endif + +inline uint32_t rot32(uint32_t x, int s) +{ +#ifdef _MSC_VER + return _rotr(x, s); +#else + return (x >> s) | (x << (32 - s)); +#endif +} + +inline uint64_t rot64(uint64_t x, int s) +{ +#ifdef _MSC_VER + return _rotr64(x, s); +#else + return (x >> s) | (x << (64 - s)); +#endif +} + +} // cybozu::sha2_local + +class Sha256 { +private: + static const size_t blockSize_ = 64; + static const size_t hSize_ = 8; + uint64_t totalSize_; + size_t roundBufSize_; + char roundBuf_[blockSize_]; + uint32_t h_[hSize_]; + static const size_t outByteSize_ = hSize_ * sizeof(uint32_t); + const uint32_t *k_; + + /** + @param buf [in] buffer(64byte) + */ + void round(const char *buf) + { + using namespace sha2_local; + uint32_t w[64]; + for (int i = 0; i < 16; i++) { + w[i] = cybozu::Get32bitAsBE(&buf[i * 4]); + } + for (int i = 16 ; i < 64; i++) { + uint32_t t = w[i - 15]; + uint32_t s0 = rot32(t, 7) ^ rot32(t, 18) ^ (t >> 3); + t = w[i - 2]; + uint32_t s1 = rot32(t, 17) ^ rot32(t, 19) ^ (t >> 10); + w[i] = w[i - 16] + s0 + w[i - 7] + s1; + } + uint32_t a = h_[0]; + uint32_t b = h_[1]; + uint32_t c = h_[2]; + uint32_t d = h_[3]; + uint32_t e = h_[4]; + uint32_t f = h_[5]; + uint32_t g = h_[6]; + uint32_t h = h_[7]; + for (int i = 0; i < 64; i++) { + uint32_t s1 = rot32(e, 6) ^ rot32(e, 11) ^ rot32(e, 25); + uint32_t ch = g ^ (e & (f ^ g)); + uint32_t t1 = h + s1 + ch + k_[i] + w[i]; + uint32_t s0 = rot32(a, 2) ^ rot32(a, 13) ^ rot32(a, 22); + uint32_t maj = ((a | b) & c) | (a & b); + uint32_t t2 = s0 + maj; + h = g; + g = f; + f = e; + e = d + t1; + d = c; + c = b; + b = a; + a = t1 + t2; + } + h_[0] += a; + h_[1] += b; + h_[2] += c; + h_[3] += d; + h_[4] += e; + h_[5] += f; + h_[6] += g; + h_[7] += h; + totalSize_ += 64; + } + /* + final phase + */ + void term(const char *buf, size_t bufSize) + { + assert(bufSize < blockSize_); + const uint64_t totalSize = totalSize_ + bufSize; + + uint8_t last[blockSize_]; + memcpy(last, buf, bufSize); + memset(&last[bufSize], 0, blockSize_ - bufSize); + last[bufSize] = uint8_t(0x80); /* top bit = 1 */ + if (bufSize >= blockSize_ - 8) { + round(reinterpret_cast<const char*>(last)); + memset(last, 0, sizeof(last)); // clear stack + } + cybozu::Set32bitAsBE(&last[56], uint32_t(totalSize >> 29)); + cybozu::Set32bitAsBE(&last[60], uint32_t(totalSize * 8)); + round(reinterpret_cast<const char*>(last)); + } +public: + Sha256() + { + clear(); + } + Sha256(const void *buf, size_t bufSize) + { + clear(); + digest(buf, bufSize); + } + void clear() + { + static const uint32_t kTbl[] = { + 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 + }; + k_ = kTbl; + totalSize_ = 0; + roundBufSize_ = 0; + h_[0] = 0x6a09e667; + h_[1] = 0xbb67ae85; + h_[2] = 0x3c6ef372; + h_[3] = 0xa54ff53a; + h_[4] = 0x510e527f; + h_[5] = 0x9b05688c; + h_[6] = 0x1f83d9ab; + h_[7] = 0x5be0cd19; + } + void update(const void *buf_, size_t bufSize) + { + const char *buf = reinterpret_cast<const char*>(buf_); + if (bufSize == 0) return; + if (roundBufSize_ > 0) { + size_t size = sha2_local::min_(blockSize_ - roundBufSize_, bufSize); + memcpy(roundBuf_ + roundBufSize_, buf, size); + roundBufSize_ += size; + buf += size; + bufSize -= size; + } + if (roundBufSize_ == blockSize_) { + round(roundBuf_); + roundBufSize_ = 0; + } + while (bufSize >= blockSize_) { + assert(roundBufSize_ == 0); + round(buf); + buf += blockSize_; + bufSize -= blockSize_; + } + if (bufSize > 0) { + assert(bufSize < blockSize_); + assert(roundBufSize_ == 0); + memcpy(roundBuf_, buf, bufSize); + roundBufSize_ = bufSize; + } + assert(roundBufSize_ < blockSize_); + } + void digest(const void *buf, size_t bufSize) + { + update(buf, bufSize); + term(roundBuf_, roundBufSize_); + } + size_t get(void *out) const + { + char *p = reinterpret_cast<char*>(out); + for (size_t i = 0; i < hSize_; i++) { + cybozu::Set32bitAsBE(&p[i * sizeof(h_[0])], h_[i]); + } + return outByteSize_; + } +#ifndef CYBOZU_DONT_USE_STRING + void update(const std::string& buf) + { + update(buf.c_str(), buf.size()); + } + void digest(const std::string& str = "") + { + digest(str.c_str(), str.size()); + } + std::string get() const + { + char out[outByteSize_]; + get(out); + return std::string(out, sizeof(out)); + } + std::string toHexStr() const + { + char buf[outByteSize_ * 2]; + sha2_local::uint32toHexStr(buf, h_, hSize_); + return std::string(buf, sizeof(buf)); + } +#endif +}; + +class Sha512 { +private: + static const size_t blockSize_ = 128; + static const size_t hSize_ = 8; + uint64_t totalSize_; + size_t roundBufSize_; + char roundBuf_[blockSize_]; + uint64_t h_[hSize_]; + static const size_t outByteSize_ = hSize_ * sizeof(uint64_t); + const uint64_t *k_; + + template<size_t i0, size_t i1, size_t i2, size_t i3, size_t i4, size_t i5, size_t i6, size_t i7> + void round1(uint64_t *S, const uint64_t *w, size_t i) + { + using namespace sha2_local; + uint64_t& a = S[i0]; + uint64_t& b = S[i1]; + uint64_t& c = S[i2]; + uint64_t& d = S[i3]; + uint64_t& e = S[i4]; + uint64_t& f = S[i5]; + uint64_t& g = S[i6]; + uint64_t& h = S[i7]; + + uint64_t s1 = rot64(e, 14) ^ rot64(e, 18) ^ rot64(e, 41); + uint64_t ch = g ^ (e & (f ^ g)); + uint64_t t0 = h + s1 + ch + k_[i] + w[i]; + uint64_t s0 = rot64(a, 28) ^ rot64(a, 34) ^ rot64(a, 39); + uint64_t maj = ((a | b) & c) | (a & b); + uint64_t t1 = s0 + maj; + d += t0; + h = t0 + t1; + } + /** + @param buf [in] buffer(64byte) + */ + void round(const char *buf) + { + using namespace sha2_local; + uint64_t w[80]; + for (int i = 0; i < 16; i++) { + w[i] = cybozu::Get64bitAsBE(&buf[i * 8]); + } + for (int i = 16 ; i < 80; i++) { + uint64_t t = w[i - 15]; + uint64_t s0 = rot64(t, 1) ^ rot64(t, 8) ^ (t >> 7); + t = w[i - 2]; + uint64_t s1 = rot64(t, 19) ^ rot64(t, 61) ^ (t >> 6); + w[i] = w[i - 16] + s0 + w[i - 7] + s1; + } + uint64_t s[8]; + for (int i = 0; i < 8; i++) { + s[i] = h_[i]; + } + for (int i = 0; i < 80; i += 8) { + round1<0, 1, 2, 3, 4, 5, 6, 7>(s, w, i + 0); + round1<7, 0, 1, 2, 3, 4, 5, 6>(s, w, i + 1); + round1<6, 7, 0, 1, 2, 3, 4, 5>(s, w, i + 2); + round1<5, 6, 7, 0, 1, 2, 3, 4>(s, w, i + 3); + round1<4, 5, 6, 7, 0, 1, 2, 3>(s, w, i + 4); + round1<3, 4, 5, 6, 7, 0, 1, 2>(s, w, i + 5); + round1<2, 3, 4, 5, 6, 7, 0, 1>(s, w, i + 6); + round1<1, 2, 3, 4, 5, 6, 7, 0>(s, w, i + 7); + } + for (int i = 0; i < 8; i++) { + h_[i] += s[i]; + } + totalSize_ += blockSize_; + } + /* + final phase + */ + void term(const char *buf, size_t bufSize) + { + assert(bufSize < blockSize_); + const uint64_t totalSize = totalSize_ + bufSize; + + uint8_t last[blockSize_]; + memcpy(last, buf, bufSize); + memset(&last[bufSize], 0, blockSize_ - bufSize); + last[bufSize] = uint8_t(0x80); /* top bit = 1 */ + if (bufSize >= blockSize_ - 16) { + round(reinterpret_cast<const char*>(last)); + memset(last, 0, sizeof(last)); // clear stack + } + cybozu::Set64bitAsBE(&last[blockSize_ - 8], totalSize * 8); + round(reinterpret_cast<const char*>(last)); + } +public: + Sha512() + { + clear(); + } + Sha512(const void *buf, size_t bufSize) + { + clear(); + digest(buf, bufSize); + } + void clear() + { + static const uint64_t kTbl[] = { + 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, + 0x59f111f1b605d019ULL, 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, + 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL, + 0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, + 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, 0x983e5152ee66dfabULL, + 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, + 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, + 0x53380d139d95b3dfULL, 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL, + 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL, + 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, + 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, 0x5b9cca4f7763e373ULL, + 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, + 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, 0xca273eceea26619cULL, + 0xd186b8c721c0c207ULL, 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, + 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, + 0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL + }; + k_ = kTbl; + totalSize_ = 0; + roundBufSize_ = 0; + h_[0] = 0x6a09e667f3bcc908ull; + h_[1] = 0xbb67ae8584caa73bull; + h_[2] = 0x3c6ef372fe94f82bull; + h_[3] = 0xa54ff53a5f1d36f1ull; + h_[4] = 0x510e527fade682d1ull; + h_[5] = 0x9b05688c2b3e6c1full; + h_[6] = 0x1f83d9abfb41bd6bull; + h_[7] = 0x5be0cd19137e2179ull; + } + void update(const void *buf_, size_t bufSize) + { + const char *buf = reinterpret_cast<const char*>(buf_); + if (bufSize == 0) return; + if (roundBufSize_ > 0) { + size_t size = sha2_local::min_(blockSize_ - roundBufSize_, bufSize); + memcpy(roundBuf_ + roundBufSize_, buf, size); + roundBufSize_ += size; + buf += size; + bufSize -= size; + } + if (roundBufSize_ == blockSize_) { + round(roundBuf_); + roundBufSize_ = 0; + } + while (bufSize >= blockSize_) { + assert(roundBufSize_ == 0); + round(buf); + buf += blockSize_; + bufSize -= blockSize_; + } + if (bufSize > 0) { + assert(bufSize < blockSize_); + assert(roundBufSize_ == 0); + memcpy(roundBuf_, buf, bufSize); + roundBufSize_ = bufSize; + } + assert(roundBufSize_ < blockSize_); + } + void digest(const void *buf, size_t bufSize) + { + update(buf, bufSize); + term(roundBuf_, roundBufSize_); + } + size_t get(void *out) const + { + char *p = reinterpret_cast<char*>(out); + for (size_t i = 0; i < hSize_; i++) { + cybozu::Set64bitAsBE(&p[i * sizeof(h_[0])], h_[i]); + } + return outByteSize_; + } +#ifndef CYBOZU_DONT_USE_STRING + void digest(const std::string& str = "") + { + digest(str.c_str(), str.size()); + } + void update(const std::string& buf) + { + update(buf.c_str(), buf.size()); + } + std::string get() const + { + char out[outByteSize_]; + get(out); + return std::string(out, sizeof(out)); + } + std::string toHexStr() const + { + char buf[outByteSize_ * 2]; + sha2_local::uint64toHexStr(buf, h_, hSize_); + return std::string(buf, sizeof(buf)); + } +#endif +}; + +} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/stream.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/stream.hpp new file mode 100644 index 000000000..bc110bdb0 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/stream.hpp @@ -0,0 +1,267 @@ +#pragma once +/** + @file + @brief stream and line stream class + + @author MITSUNARI Shigeo(@herumi) +*/ +#ifndef CYBOZU_DONT_USE_STRING +#include <string> +#include <iosfwd> +#endif +#include <cybozu/exception.hpp> +#include <memory.h> + +namespace cybozu { + +namespace stream_local { + +template <typename From, typename To> +struct is_convertible { + typedef char yes; + typedef int no; + + static no test(...); + static yes test(const To*); + static const bool value = sizeof(test(static_cast<const From*>(0))) == sizeof(yes); +}; + +template <bool b, class T = void> +struct enable_if { typedef T type; }; + +template <class T> +struct enable_if<false, T> {}; + +#ifndef CYBOZU_DONT_USE_STRING +/* specialization for istream */ +template<class InputStream> +size_t readSome_inner(void *buf, size_t size, InputStream& is, typename enable_if<is_convertible<InputStream, std::istream>::value>::type* = 0) +{ + if (size > 0x7fffffff) size = 0x7fffffff; + is.read(static_cast<char *>(buf), size); + const int64_t readSize = is.gcount(); + if (readSize < 0) return 0; + if (size == 1 && readSize == 0) is.clear(); + return static_cast<size_t>(readSize); +} + +/* generic version for size_t readSome(void *, size_t) */ +template<class InputStream> +size_t readSome_inner(void *buf, size_t size, InputStream& is, typename enable_if<!is_convertible<InputStream, std::istream>::value>::type* = 0) +{ + return is.readSome(buf, size); +} +#else +template<class InputStream> +size_t readSome_inner(void *buf, size_t size, InputStream& is) +{ + return is.readSome(buf, size); +} +#endif + +#ifndef CYBOZU_DONT_USE_EXCEPTION +/* specialization for ostream */ +template<class OutputStream> +void writeSub(OutputStream& os, const void *buf, size_t size, typename enable_if<is_convertible<OutputStream, std::ostream>::value>::type* = 0) +{ + if (!os.write(static_cast<const char *>(buf), size)) throw cybozu::Exception("stream:writeSub") << size; +} +#endif + +#ifndef CYBOZU_DONT_USE_STRING +/* generic version for void write(const void*, size_t), which writes all data */ +template<class OutputStream> +void writeSub(OutputStream& os, const void *buf, size_t size, typename enable_if<!is_convertible<OutputStream, std::ostream>::value>::type* = 0) +{ + os.write(buf, size); +} + +template<class OutputStream> +void writeSub(bool *pb, OutputStream& os, const void *buf, size_t size, typename enable_if<is_convertible<OutputStream, std::ostream>::value>::type* = 0) +{ + *pb = !!os.write(static_cast<const char *>(buf), size); +} + +/* generic version for void write(const void*, size_t), which writes all data */ +template<class OutputStream> +void writeSub(bool *pb, OutputStream& os, const void *buf, size_t size, typename enable_if<!is_convertible<OutputStream, std::ostream>::value>::type* = 0) +{ + os.write(pb, buf, size); +} +#else +template<class OutputStream> +void writeSub(bool *pb, OutputStream& os, const void *buf, size_t size) +{ + os.write(pb, buf, size); +} +#endif + +} // stream_local + +/* + make a specializaiton of class to use new InputStream, OutputStream +*/ +template<class InputStream> +struct InputStreamTag { + static size_t readSome(void *buf, size_t size, InputStream& is) + { + return stream_local::readSome_inner<InputStream>(buf, size, is); + } + static bool readChar(char *c, InputStream& is) + { + return readSome(c, 1, is) == 1; + } +}; + +template<class OutputStream> +struct OutputStreamTag { + static void write(OutputStream& os, const void *buf, size_t size) + { + stream_local::writeSub<OutputStream>(os, buf, size); + } +}; + +class MemoryInputStream { + const char *p_; + size_t size_; + size_t pos; +public: + MemoryInputStream(const void *p, size_t size) : p_(static_cast<const char *>(p)), size_(size), pos(0) {} + size_t readSome(void *buf, size_t size) + { + if (size > size_ - pos) size = size_ - pos; + memcpy(buf, p_ + pos, size); + pos += size; + return size; + } + size_t getPos() const { return pos; } +}; + +class MemoryOutputStream { + char *p_; + size_t size_; + size_t pos; +public: + MemoryOutputStream(void *p, size_t size) : p_(static_cast<char *>(p)), size_(size), pos(0) {} + void write(bool *pb, const void *buf, size_t size) + { + if (size > size_ - pos) { + *pb = false; + return; + } + memcpy(p_ + pos, buf, size); + pos += size; + *pb = true; + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void write(const void *buf, size_t size) + { + bool b; + write(&b, buf, size); + if (!b) throw cybozu::Exception("MemoryOutputStream:write") << size << size_ << pos; + } +#endif + size_t getPos() const { return pos; } +}; + +#ifndef CYBOZU_DONT_USE_STRING +class StringInputStream { + const std::string& str_; + size_t pos; + StringInputStream(const StringInputStream&); + void operator=(const StringInputStream&); +public: + explicit StringInputStream(const std::string& str) : str_(str), pos(0) {} + size_t readSome(void *buf, size_t size) + { + const size_t remainSize = str_.size() - pos; + if (size > remainSize) size = remainSize; + memcpy(buf, &str_[pos], size); + pos += size; + return size; + } + size_t getPos() const { return pos; } +}; + +class StringOutputStream { + std::string& str_; + StringOutputStream(const StringOutputStream&); + void operator=(const StringOutputStream&); +public: + explicit StringOutputStream(std::string& str) : str_(str) {} + void write(bool *pb, const void *buf, size_t size) + { + str_.append(static_cast<const char *>(buf), size); + *pb = true; + } + void write(const void *buf, size_t size) + { + str_.append(static_cast<const char *>(buf), size); + } + size_t getPos() const { return str_.size(); } +}; +#endif + +template<class InputStream> +size_t readSome(void *buf, size_t size, InputStream& is) +{ + return stream_local::readSome_inner(buf, size, is); +} + +template<class OutputStream> +void write(OutputStream& os, const void *buf, size_t size) +{ + stream_local::writeSub(os, buf, size); +} + +template<class OutputStream> +void write(bool *pb, OutputStream& os, const void *buf, size_t size) +{ + stream_local::writeSub(pb, os, buf, size); +} + +template<typename InputStream> +void read(bool *pb, void *buf, size_t size, InputStream& is) +{ + char *p = static_cast<char*>(buf); + while (size > 0) { + size_t readSize = cybozu::readSome(p, size, is); + if (readSize == 0) { + *pb = false; + return; + } + p += readSize; + size -= readSize; + } + *pb = true; +} + +#ifndef CYBOZU_DONT_USE_EXCEPTION +template<typename InputStream> +void read(void *buf, size_t size, InputStream& is) +{ + bool b; + read(&b, buf, size, is); + if (!b) throw cybozu::Exception("stream:read"); +} +#endif + +template<class InputStream> +bool readChar(char *c, InputStream& is) +{ + return readSome(c, 1, is) == 1; +} + +template<class OutputStream> +void writeChar(OutputStream& os, char c) +{ + cybozu::write(os, &c, 1); +} + +template<class OutputStream> +void writeChar(bool *pb, OutputStream& os, char c) +{ + cybozu::write(pb, os, &c, 1); +} + +} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/test.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/test.hpp new file mode 100644 index 000000000..7dfffab96 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/test.hpp @@ -0,0 +1,373 @@ +#pragma once +/** + @file + @brief unit test class + + @author MITSUNARI Shigeo(@herumi) +*/ + +#include <stdio.h> +#include <string.h> +#include <string> +#include <list> +#include <iostream> +#include <utility> +#if defined(_MSC_VER) && (MSC_VER <= 1500) + #include <cybozu/inttype.hpp> +#else + #include <stdint.h> +#endif + +namespace cybozu { namespace test { + +class AutoRun { + typedef void (*Func)(); + typedef std::list<std::pair<const char*, Func> > UnitTestList; +public: + AutoRun() + : init_(0) + , term_(0) + , okCount_(0) + , ngCount_(0) + , exceptionCount_(0) + { + } + void setup(Func init, Func term) + { + init_ = init; + term_ = term; + } + void append(const char *name, Func func) + { + list_.push_back(std::make_pair(name, func)); + } + void set(bool isOK) + { + if (isOK) { + okCount_++; + } else { + ngCount_++; + } + } + std::string getBaseName(const std::string& name) const + { +#ifdef _WIN32 + const char sep = '\\'; +#else + const char sep = '/'; +#endif + size_t pos = name.find_last_of(sep); + std::string ret = name.substr(pos + 1); + pos = ret.find('.'); + return ret.substr(0, pos); + } + int run(int, char *argv[]) + { + std::string msg; + try { + if (init_) init_(); + for (UnitTestList::const_iterator i = list_.begin(), ie = list_.end(); i != ie; ++i) { + std::cout << "ctest:module=" << i->first << std::endl; + try { + (i->second)(); + } catch (std::exception& e) { + exceptionCount_++; + std::cout << "ctest: " << i->first << " is stopped by exception " << e.what() << std::endl; + } catch (...) { + exceptionCount_++; + std::cout << "ctest: " << i->first << " is stopped by unknown exception" << std::endl; + } + } + if (term_) term_(); + } catch (std::exception& e) { + msg = std::string("ctest:err:") + e.what(); + } catch (...) { + msg = "ctest:err: catch unknown exception"; + } + fflush(stdout); + if (msg.empty()) { + int err = ngCount_ + exceptionCount_; + int total = okCount_ + err; + std::cout << "ctest:name=" << getBaseName(*argv) + << ", module=" << list_.size() + << ", total=" << total + << ", ok=" << okCount_ + << ", ng=" << ngCount_ + << ", exception=" << exceptionCount_ << std::endl; + return err > 0 ? 1 : 0; + } else { + std::cout << msg << std::endl; + return 1; + } + } + static inline AutoRun& getInstance() + { + static AutoRun instance; + return instance; + } +private: + Func init_; + Func term_; + int okCount_; + int ngCount_; + int exceptionCount_; + UnitTestList list_; +}; + +static AutoRun& autoRun = AutoRun::getInstance(); + +inline void test(bool ret, const std::string& msg, const std::string& param, const char *file, int line) +{ + autoRun.set(ret); + if (!ret) { + printf("%s(%d):ctest:%s(%s);\n", file, line, msg.c_str(), param.c_str()); + } +} + +template<typename T, typename U> +bool isEqual(const T& lhs, const U& rhs) +{ + return lhs == rhs; +} + +// avoid warning of comparision of integers of different signs +inline bool isEqual(size_t lhs, int rhs) +{ + return lhs == size_t(rhs); +} +inline bool isEqual(int lhs, size_t rhs) +{ + return size_t(lhs) == rhs; +} +inline bool isEqual(const char *lhs, const char *rhs) +{ + return strcmp(lhs, rhs) == 0; +} +inline bool isEqual(char *lhs, const char *rhs) +{ + return strcmp(lhs, rhs) == 0; +} +inline bool isEqual(const char *lhs, char *rhs) +{ + return strcmp(lhs, rhs) == 0; +} +inline bool isEqual(char *lhs, char *rhs) +{ + return strcmp(lhs, rhs) == 0; +} +// avoid to compare float directly +inline bool isEqual(float lhs, float rhs) +{ + union fi { + float f; + uint32_t i; + } lfi, rfi; + lfi.f = lhs; + rfi.f = rhs; + return lfi.i == rfi.i; +} +// avoid to compare double directly +inline bool isEqual(double lhs, double rhs) +{ + union di { + double d; + uint64_t i; + } ldi, rdi; + ldi.d = lhs; + rdi.d = rhs; + return ldi.i == rdi.i; +} + +} } // cybozu::test + +#ifndef CYBOZU_TEST_DISABLE_AUTO_RUN +int main(int argc, char *argv[]) +{ + return cybozu::test::autoRun.run(argc, argv); +} +#endif + +/** + alert if !x + @param x [in] +*/ +#define CYBOZU_TEST_ASSERT(x) cybozu::test::test(!!(x), "CYBOZU_TEST_ASSERT", #x, __FILE__, __LINE__) + +/** + alert if x != y + @param x [in] + @param y [in] +*/ +#define CYBOZU_TEST_EQUAL(x, y) { \ + bool _cybozu_eq = cybozu::test::isEqual(x, y); \ + cybozu::test::test(_cybozu_eq, "CYBOZU_TEST_EQUAL", #x ", " #y, __FILE__, __LINE__); \ + if (!_cybozu_eq) { \ + std::cout << "ctest: lhs=" << (x) << std::endl; \ + std::cout << "ctest: rhs=" << (y) << std::endl; \ + } \ +} +/** + alert if fabs(x, y) >= eps + @param x [in] + @param y [in] +*/ +#define CYBOZU_TEST_NEAR(x, y, eps) { \ + bool _cybozu_isNear = fabs((x) - (y)) < eps; \ + cybozu::test::test(_cybozu_isNear, "CYBOZU_TEST_NEAR", #x ", " #y, __FILE__, __LINE__); \ + if (!_cybozu_isNear) { \ + std::cout << "ctest: lhs=" << (x) << std::endl; \ + std::cout << "ctest: rhs=" << (y) << std::endl; \ + } \ +} + +#define CYBOZU_TEST_EQUAL_POINTER(x, y) { \ + bool _cybozu_eq = x == y; \ + cybozu::test::test(_cybozu_eq, "CYBOZU_TEST_EQUAL_POINTER", #x ", " #y, __FILE__, __LINE__); \ + if (!_cybozu_eq) { \ + std::cout << "ctest: lhs=" << static_cast<const void*>(x) << std::endl; \ + std::cout << "ctest: rhs=" << static_cast<const void*>(y) << std::endl; \ + } \ +} +/** + alert if x[] != y[] + @param x [in] + @param y [in] + @param n [in] +*/ +#define CYBOZU_TEST_EQUAL_ARRAY(x, y, n) { \ + for (size_t _cybozu_test_i = 0, _cybozu_ie = (size_t)(n); _cybozu_test_i < _cybozu_ie; _cybozu_test_i++) { \ + bool _cybozu_eq = cybozu::test::isEqual((x)[_cybozu_test_i], (y)[_cybozu_test_i]); \ + cybozu::test::test(_cybozu_eq, "CYBOZU_TEST_EQUAL_ARRAY", #x ", " #y ", " #n, __FILE__, __LINE__); \ + if (!_cybozu_eq) { \ + std::cout << "ctest: i=" << _cybozu_test_i << std::endl; \ + std::cout << "ctest: lhs=" << (x)[_cybozu_test_i] << std::endl; \ + std::cout << "ctest: rhs=" << (y)[_cybozu_test_i] << std::endl; \ + } \ + } \ +} + +/** + always alert + @param msg [in] +*/ +#define CYBOZU_TEST_FAIL(msg) cybozu::test::test(false, "CYBOZU_TEST_FAIL", msg, __FILE__, __LINE__) + +/** + verify message in exception +*/ +#define CYBOZU_TEST_EXCEPTION_MESSAGE(statement, Exception, msg) \ +{ \ + int _cybozu_ret = 0; \ + std::string _cybozu_errMsg; \ + try { \ + statement; \ + _cybozu_ret = 1; \ + } catch (const Exception& _cybozu_e) { \ + _cybozu_errMsg = _cybozu_e.what(); \ + if (_cybozu_errMsg.find(msg) == std::string::npos) { \ + _cybozu_ret = 2; \ + } \ + } catch (...) { \ + _cybozu_ret = 3; \ + } \ + if (_cybozu_ret) { \ + cybozu::test::test(false, "CYBOZU_TEST_EXCEPTION_MESSAGE", #statement ", " #Exception ", " #msg, __FILE__, __LINE__); \ + if (_cybozu_ret == 1) { \ + std::cout << "ctest: no exception" << std::endl; \ + } else if (_cybozu_ret == 2) { \ + std::cout << "ctest: bad exception msg:" << _cybozu_errMsg << std::endl; \ + } else { \ + std::cout << "ctest: unexpected exception" << std::endl; \ + } \ + } else { \ + cybozu::test::autoRun.set(true); \ + } \ +} + +#define CYBOZU_TEST_EXCEPTION(statement, Exception) \ +{ \ + int _cybozu_ret = 0; \ + try { \ + statement; \ + _cybozu_ret = 1; \ + } catch (const Exception&) { \ + } catch (...) { \ + _cybozu_ret = 2; \ + } \ + if (_cybozu_ret) { \ + cybozu::test::test(false, "CYBOZU_TEST_EXCEPTION", #statement ", " #Exception, __FILE__, __LINE__); \ + if (_cybozu_ret == 1) { \ + std::cout << "ctest: no exception" << std::endl; \ + } else { \ + std::cout << "ctest: unexpected exception" << std::endl; \ + } \ + } else { \ + cybozu::test::autoRun.set(true); \ + } \ +} + +/** + verify statement does not throw +*/ +#define CYBOZU_TEST_NO_EXCEPTION(statement) \ +try { \ + statement; \ + cybozu::test::autoRun.set(true); \ +} catch (...) { \ + cybozu::test::test(false, "CYBOZU_TEST_NO_EXCEPTION", #statement, __FILE__, __LINE__); \ +} + +/** + append auto unit test + @param name [in] module name +*/ +#define CYBOZU_TEST_AUTO(name) \ +void cybozu_test_ ## name(); \ +struct cybozu_test_local_ ## name { \ + cybozu_test_local_ ## name() \ + { \ + cybozu::test::autoRun.append(#name, cybozu_test_ ## name); \ + } \ +} cybozu_test_local_instance_ ## name; \ +void cybozu_test_ ## name() + +/** + append auto unit test with fixture + @param name [in] module name +*/ +#define CYBOZU_TEST_AUTO_WITH_FIXTURE(name, Fixture) \ +void cybozu_test_ ## name(); \ +void cybozu_test_real_ ## name() \ +{ \ + Fixture f; \ + cybozu_test_ ## name(); \ +} \ +struct cybozu_test_local_ ## name { \ + cybozu_test_local_ ## name() \ + { \ + cybozu::test::autoRun.append(#name, cybozu_test_real_ ## name); \ + } \ +} cybozu_test_local_instance_ ## name; \ +void cybozu_test_ ## name() + +/** + setup fixture + @param Fixture [in] class name of fixture + @note cstr of Fixture is called before test and dstr of Fixture is called after test +*/ +#define CYBOZU_TEST_SETUP_FIXTURE(Fixture) \ +Fixture *cybozu_test_local_fixture; \ +void cybozu_test_local_init() \ +{ \ + cybozu_test_local_fixture = new Fixture(); \ +} \ +void cybozu_test_local_term() \ +{ \ + delete cybozu_test_local_fixture; \ +} \ +struct cybozu_test_local_fixture_setup_ { \ + cybozu_test_local_fixture_setup_() \ + { \ + cybozu::test::autoRun.setup(cybozu_test_local_init, cybozu_test_local_term); \ + } \ +} cybozu_test_local_fixture_setup_instance_; diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/unordered_map.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/unordered_map.hpp new file mode 100644 index 000000000..89f8f8774 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/unordered_map.hpp @@ -0,0 +1,13 @@ +#pragma once + +#include <cybozu/inttype.hpp> + +#ifdef CYBOZU_USE_BOOST + #include <boost/unordered_map.hpp> +#elif (CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11) || (defined __APPLE__) + #include <unordered_map> +#elif (CYBOZU_CPP_VERSION == CYBOZU_CPP_VERSION_TR1) + #include <list> + #include <tr1/unordered_map> +#endif + diff --git a/vendor/github.com/dexon-foundation/mcl/include/cybozu/xorshift.hpp b/vendor/github.com/dexon-foundation/mcl/include/cybozu/xorshift.hpp new file mode 100644 index 000000000..69edaa939 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/cybozu/xorshift.hpp @@ -0,0 +1,172 @@ +#pragma once +/** + @file + @brief XorShift + + @author MITSUNARI Shigeo(@herumi) + @author MITSUNARI Shigeo +*/ +#include <cybozu/inttype.hpp> + +namespace cybozu { + +class XorShift { + uint32_t x_, y_, z_, w_; +public: + explicit XorShift(uint32_t x = 0, uint32_t y = 0, uint32_t z = 0, uint32_t w = 0) + { + init(x, y, z, w); + } + void init(uint32_t x = 0, uint32_t y = 0, uint32_t z = 0, uint32_t w = 0) + { + x_ = x ? x : 123456789; + y_ = y ? y : 362436069; + z_ = z ? z : 521288629; + w_ = w ? w : 88675123; + } + uint32_t get32() + { + unsigned int t = x_ ^ (x_ << 11); + x_ = y_; y_ = z_; z_ = w_; + return w_ = (w_ ^ (w_ >> 19)) ^ (t ^ (t >> 8)); + } + uint32_t operator()() { return get32(); } + uint64_t get64() + { + uint32_t a = get32(); + uint32_t b = get32(); + return (uint64_t(a) << 32) | b; + } + template<class T> + void read(T *x, size_t n) + { + const size_t size = sizeof(T) * n; + uint8_t *p8 = static_cast<uint8_t*>(x); + for (size_t i = 0; i < size; i++) { + p8[i] = static_cast<uint8_t>(get32()); + } + } + void read(uint32_t *x, size_t n) + { + for (size_t i = 0; i < n; i++) { + x[i] = get32(); + } + } + void read(uint64_t *x, size_t n) + { + for (size_t i = 0; i < n; i++) { + x[i] = get64(); + } + } +}; + +// see http://xorshift.di.unimi.it/xorshift128plus.c +class XorShift128Plus { + uint64_t s_[2]; + static const uint64_t seed0 = 123456789; + static const uint64_t seed1 = 987654321; +public: + explicit XorShift128Plus(uint64_t s0 = seed0, uint64_t s1 = seed1) + { + init(s0, s1); + } + void init(uint64_t s0 = seed0, uint64_t s1 = seed1) + { + s_[0] = s0; + s_[1] = s1; + } + uint32_t get32() + { + return static_cast<uint32_t>(get64()); + } + uint64_t operator()() { return get64(); } + uint64_t get64() + { + uint64_t s1 = s_[0]; + const uint64_t s0 = s_[1]; + s_[0] = s0; + s1 ^= s1 << 23; + s_[1] = s1 ^ s0 ^ (s1 >> 18) ^ (s0 >> 5); + return s_[1] + s0; + } + template<class T> + void read(T *x, size_t n) + { + const size_t size = sizeof(T) * n; + uint8_t *p8 = static_cast<uint8_t*>(x); + for (size_t i = 0; i < size; i++) { + p8[i] = static_cast<uint8_t>(get32()); + } + } + void read(uint32_t *x, size_t n) + { + for (size_t i = 0; i < n; i++) { + x[i] = get32(); + } + } + void read(uint64_t *x, size_t n) + { + for (size_t i = 0; i < n; i++) { + x[i] = get64(); + } + } +}; + +// see http://xoroshiro.di.unimi.it/xoroshiro128plus.c +class Xoroshiro128Plus { + uint64_t s_[2]; + static const uint64_t seed0 = 123456789; + static const uint64_t seed1 = 987654321; + uint64_t rotl(uint64_t x, unsigned int k) const + { + return (x << k) | (x >> (64 - k)); + } +public: + explicit Xoroshiro128Plus(uint64_t s0 = seed0, uint64_t s1 = seed1) + { + init(s0, s1); + } + void init(uint64_t s0 = seed0, uint64_t s1 = seed1) + { + s_[0] = s0; + s_[1] = s1; + } + uint32_t get32() + { + return static_cast<uint32_t>(get64()); + } + uint64_t operator()() { return get64(); } + uint64_t get64() + { + uint64_t s0 = s_[0]; + uint64_t s1 = s_[1]; + uint64_t result = s0 + s1; + s1 ^= s0; + s_[0] = rotl(s0, 55) ^ s1 ^ (s1 << 14); + s_[1] = rotl(s1, 36); + return result; + } + template<class T> + void read(T *x, size_t n) + { + const size_t size = sizeof(T) * n; + uint8_t *p8 = static_cast<uint8_t*>(x); + for (size_t i = 0; i < size; i++) { + p8[i] = static_cast<uint8_t>(get32()); + } + } + void read(uint32_t *x, size_t n) + { + for (size_t i = 0; i < n; i++) { + x[i] = get32(); + } + } + void read(uint64_t *x, size_t n) + { + for (size_t i = 0; i < n; i++) { + x[i] = get64(); + } + } +}; + +} // cybozu diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/aggregate_sig.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/aggregate_sig.hpp new file mode 100644 index 000000000..f31405705 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/aggregate_sig.hpp @@ -0,0 +1,265 @@ +#pragma once +/** + @file + @brief aggregate signature + @author MITSUNARI Shigeo(@herumi) + see http://crypto.stanford.edu/~dabo/papers/aggreg.pdf + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <cmath> +#include <vector> +#include <iosfwd> +#include <set> +#ifndef MCLBN_FP_UNIT_SIZE + #define MCLBN_FP_UNIT_SIZE 4 +#endif +#if MCLBN_FP_UNIT_SIZE == 4 +#include <mcl/bn256.hpp> +namespace mcl { +using namespace mcl::bn256; +} +#elif MCLBN_FP_UNIT_SIZE == 6 +#include <mcl/bn384.hpp> +namespace mcl { +using namespace mcl::bn384; +} +#elif MCLBN_FP_UNIT_SIZE == 8 +#include <mcl/bn512.hpp> +namespace mcl { +using namespace mcl::bn512; +} +#else + #error "MCLBN_FP_UNIT_SIZE must be 4, 6, or 8" +#endif + +namespace mcl { namespace aggs { + +/* + AGGregate Signature Template class +*/ +template<size_t dummyImpl = 0> +struct AGGST { + typedef typename G1::BaseFp Fp; + + class SecretKey; + class PublicKey; + class Signature; + + static G1 P_; + static G2 Q_; + static std::vector<Fp6> Qcoeff_; +public: + static void init(const mcl::CurveParam& cp = mcl::BN254) + { + initPairing(cp); + hashAndMapToG1(P_, "0"); + hashAndMapToG2(Q_, "0"); + precomputeG2(Qcoeff_, Q_); + } + class Signature : public fp::Serializable<Signature> { + G1 S_; + friend class SecretKey; + friend class PublicKey; + public: + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + S_.load(is, ioMode); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + S_.save(os, ioMode); + } + friend std::istream& operator>>(std::istream& is, Signature& self) + { + self.load(is, fp::detectIoMode(G1::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const Signature& self) + { + self.save(os, fp::detectIoMode(G1::getIoMode(), os)); + return os; + } + bool operator==(const Signature& rhs) const + { + return S_ == rhs.S_; + } + bool operator!=(const Signature& rhs) const { return !operator==(rhs); } + /* + aggregate sig[0..n) and set *this + */ + void aggregate(const Signature *sig, size_t n) + { + G1 S; + S.clear(); + for (size_t i = 0; i < n; i++) { + S += sig[i].S_; + } + S_ = S; + } + void aggregate(const std::vector<Signature>& sig) + { + aggregate(sig.data(), sig.size()); + } + /* + aggregate verification + */ + bool verify(const void *const *msgVec, const size_t *sizeVec, const PublicKey *pubVec, size_t n) const + { + if (n == 0) return false; + typedef std::set<Fp> FpSet; + FpSet msgSet; + typedef std::vector<G1> G1Vec; + G1Vec hv(n); + for (size_t i = 0; i < n; i++) { + Fp h; + h.setHashOf(msgVec[i], sizeVec[i]); + std::pair<typename FpSet::iterator, bool> ret = msgSet.insert(h); + if (!ret.second) throw cybozu::Exception("aggs::verify:same msg"); + mapToG1(hv[i], h); + } + /* + e(aggSig, xQ) = prod_i e(hv[i], pub[i].Q) + <=> finalExp(e(-aggSig, xQ) * prod_i millerLoop(hv[i], pub[i].xQ)) == 1 + */ + GT e1, e2; + precomputedMillerLoop(e1, -S_, Qcoeff_); + millerLoop(e2, hv[0], pubVec[0].xQ_); + for (size_t i = 1; i < n; i++) { + GT e; + millerLoop(e, hv[i], pubVec[i].xQ_); + e2 *= e; + } + e1 *= e2; + finalExp(e1, e1); + return e1.isOne(); + } + bool verify(const std::vector<std::string>& msgVec, const std::vector<PublicKey>& pubVec) const + { + const size_t n = msgVec.size(); + if (n != pubVec.size()) throw cybozu::Exception("aggs:Signature:verify:bad size") << msgVec.size() << pubVec.size(); + if (n == 0) return false; + std::vector<const void*> mv(n); + std::vector<size_t> sv(n); + for (size_t i = 0; i < n; i++) { + mv[i] = msgVec[i].c_str(); + sv[i] = msgVec[i].size(); + } + return verify(&mv[0], &sv[0], &pubVec[0], n); + } + }; + class PublicKey : public fp::Serializable<PublicKey> { + G2 xQ_; + friend class SecretKey; + friend class Signature; + public: + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + xQ_.load(is, ioMode); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + xQ_.save(os, ioMode); + } + friend std::istream& operator>>(std::istream& is, PublicKey& self) + { + self.load(is, fp::detectIoMode(G2::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const PublicKey& self) + { + self.save(os, fp::detectIoMode(G2::getIoMode(), os)); + return os; + } + bool operator==(const PublicKey& rhs) const + { + return xQ_ == rhs.xQ_; + } + bool operator!=(const PublicKey& rhs) const { return !operator==(rhs); } + bool verify(const Signature& sig, const void *m, size_t mSize) const + { + /* + H = hash(m) + e(S, Q) = e(H, xQ) where S = xH + <=> e(S, Q)e(-H, xQ) = 1 + <=> finalExp(millerLoop(S, Q)e(-H, x)) = 1 + */ + G1 H; + hashAndMapToG1(H, m, mSize); + G1::neg(H, H); + GT e1, e2; + precomputedMillerLoop(e1, sig.S_, Qcoeff_); + millerLoop(e2, H, xQ_); + e1 *= e2; + finalExp(e1, e1); + return e1.isOne(); + } + bool verify(const Signature& sig, const std::string& m) const + { + return verify(sig, m.c_str(), m.size()); + } + }; + class SecretKey : public fp::Serializable<SecretKey> { + Fr x_; + friend class PublicKey; + friend class Signature; + public: + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + x_.load(is, ioMode); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + x_.save(os, ioMode); + } + friend std::istream& operator>>(std::istream& is, SecretKey& self) + { + self.load(is, fp::detectIoMode(Fr::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const SecretKey& self) + { + self.save(os, fp::detectIoMode(Fr::getIoMode(), os)); + return os; + } + bool operator==(const SecretKey& rhs) const + { + return x_ == rhs.x_; + } + bool operator!=(const SecretKey& rhs) const { return !operator==(rhs); } + void init() + { + x_.setByCSPRNG(); + } + void getPublicKey(PublicKey& pub) const + { + G2::mul(pub.xQ_, Q_, x_); + } + void sign(Signature& sig, const void *m, size_t mSize) const + { + hashAndMapToG1(sig.S_, m, mSize); + G1::mul(sig.S_, sig.S_, x_); + } + void sign(Signature& sig, const std::string& m) const + { + sign(sig, m.c_str(), m.size()); + } + }; +}; + +template<size_t dummyImpl> G1 AGGST<dummyImpl>::P_; +template<size_t dummyImpl> G2 AGGST<dummyImpl>::Q_; +template<size_t dummyImpl> std::vector<Fp6> AGGST<dummyImpl>::Qcoeff_; + +typedef AGGST<> AGGS; +typedef AGGS::SecretKey SecretKey; +typedef AGGS::PublicKey PublicKey; +typedef AGGS::Signature Signature; + +} } // mcl::aggs diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/ahe.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/ahe.hpp new file mode 100644 index 000000000..239319d0d --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/ahe.hpp @@ -0,0 +1,76 @@ +#pragma once +/** + @file + @brief 192/256-bit additive homomorphic encryption by lifted-ElGamal + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <mcl/elgamal.hpp> +#include <mcl/ecparam.hpp> + +namespace mcl { + +#ifdef MCL_USE_AHE192 +namespace ahe192 { + +const mcl::EcParam& para = mcl::ecparam::NIST_P192; + +typedef mcl::FpT<mcl::FpTag, 192> Fp; +typedef mcl::FpT<mcl::ZnTag, 192> Zn; +typedef mcl::EcT<Fp> Ec; +typedef mcl::ElgamalT<Ec, Zn> ElgamalEc; +typedef ElgamalEc::PrivateKey SecretKey; +typedef ElgamalEc::PublicKey PublicKey; +typedef ElgamalEc::CipherText CipherText; + +static inline void initAhe() +{ + Fp::init(para.p); + Zn::init(para.n); + Ec::init(para.a, para.b); + Ec::setIoMode(16); + Zn::setIoMode(16); +} + +static inline void initSecretKey(SecretKey& sec) +{ + const Ec P(Fp(para.gx), Fp(para.gy)); + sec.init(P, Zn::getBitSize()); +} + +} //mcl::ahe192 +#endif + +#ifdef MCL_USE_AHE256 +namespace ahe256 { + +const mcl::EcParam& para = mcl::ecparam::NIST_P256; + +typedef mcl::FpT<mcl::FpTag, 256> Fp; +typedef mcl::FpT<mcl::ZnTag, 256> Zn; +typedef mcl::EcT<Fp> Ec; +typedef mcl::ElgamalT<Ec, Zn> ElgamalEc; +typedef ElgamalEc::PrivateKey SecretKey; +typedef ElgamalEc::PublicKey PublicKey; +typedef ElgamalEc::CipherText CipherText; + +static inline void initAhe() +{ + Fp::init(para.p); + Zn::init(para.n); + Ec::init(para.a, para.b); + Ec::setIoMode(16); + Zn::setIoMode(16); +} + +static inline void initSecretKey(SecretKey& sec) +{ + const Ec P(Fp(para.gx), Fp(para.gy)); + sec.init(P, Zn::getBitSize()); +} + +} //mcl::ahe256 +#endif + +} // mcl diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/array.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/array.hpp new file mode 100644 index 000000000..84c5f3765 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/array.hpp @@ -0,0 +1,146 @@ +#pragma once +/** + @file + @brief tiny vector class + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <stdlib.h> +#include <stddef.h> + +namespace mcl { + +template<class T> +class Array { + T *p_; + size_t n_; + Array(const Array&); + void operator=(const Array&); + template<class U> + void swap_(U& x, U& y) const + { + U t; + t = x; + x = y; + y = t; + } +public: + Array() : p_(0), n_(0) {} + ~Array() + { + free(p_); + } + bool resize(size_t n) + { + if (n <= n_) { + n_ = n; + if (n == 0) { + free(p_); + p_ = 0; + } + return true; + } + T *q = (T*)malloc(sizeof(T) * n); + if (q == 0) return false; + for (size_t i = 0; i < n_; i++) { + q[i] = p_[i]; + } + free(p_); + p_ = q; + n_ = n; + return true; + } + bool copy(const Array<T>& rhs) + { + if (this == &rhs) return true; + if (n_ < rhs.n_) { + clear(); + if (!resize(rhs.n_)) return false; + } + for (size_t i = 0; i < rhs.n_; i++) { + p_[i] = rhs.p_[i]; + } + n_ = rhs.n_; + return true; + } + void clear() + { + free(p_); + p_ = 0; + n_ = 0; + } + size_t size() const { return n_; } + void swap(Array<T>& rhs) + { + swap_(p_, rhs.p_); + swap_(n_, rhs.n_); + } + T& operator[](size_t n) { return p_[n]; } + const T& operator[](size_t n) const { return p_[n]; } + T* data() { return p_; } + const T* data() const { return p_; } +}; + +template<class T, size_t maxSize> +class FixedArray { + T p_[maxSize]; + size_t n_; + FixedArray(const FixedArray&); + void operator=(const FixedArray&); + template<class U> + void swap_(U& x, U& y) const + { + U t; + t = x; + x = y; + y = t; + } +public: + FixedArray() : n_(0) {} + bool resize(size_t n) + { + if (n > maxSize) return false; + n_ = n; + return true; + } + bool copy(const FixedArray<T, maxSize>& rhs) + { + if (this == &rhs) return true; + for (size_t i = 0; i < rhs.n_; i++) { + p_[i] = rhs.p_[i]; + } + n_ = rhs.n_; + return true; + } + void clear() + { + n_ = 0; + } + size_t size() const { return n_; } + void swap(FixedArray<T, maxSize>& rhs) + { + T *minP = p_; + size_t minN = n_; + T *maxP = rhs.p_; + size_t maxN = rhs.n_; + if (minP > maxP) { + swap_(minP, maxP); + swap_(minN, maxN); + } + for (size_t i = 0; i < minN; i++) { + swap_(minP[i], maxP[i]); + } + for (size_t i = minN; i < maxN; i++) { + minP[i] = maxP[i]; + } + swap_(n_, rhs.n_); + } + T& operator[](size_t n) { return p_[n]; } + const T& operator[](size_t n) const { return p_[n]; } + T* data() { return p_; } + const T* data() const { return p_; } +}; + +} // mcl + diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/bls12_381.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/bls12_381.hpp new file mode 100644 index 000000000..316e142af --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/bls12_381.hpp @@ -0,0 +1,15 @@ +#pragma once +/** + @file + @brief preset class for BLS12-381 pairing + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#define MCL_MAX_FP_BIT_SIZE 384 +#define MCL_MAX_FR_BIT_SIZE 256 +#include <mcl/bn.hpp> + +namespace mcl { namespace bls12 { +using namespace mcl::bn; +} } diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn.h b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn.h new file mode 100644 index 000000000..9c78f92f1 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn.h @@ -0,0 +1,368 @@ +#pragma once +/** + @file + @brief C interface of 256/384-bit optimal ate pairing over BN curves + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +/* + the order of an elliptic curve over Fp is Fr +*/ +#ifndef MCLBN_FP_UNIT_SIZE + #error "define MCLBN_FP_UNIT_SIZE 4(, 6 or 8)" +#endif +#ifndef MCLBN_FR_UNIT_SIZE + #define MCLBN_FR_UNIT_SIZE MCLBN_FP_UNIT_SIZE +#endif +#define MCLBN_COMPILED_TIME_VAR ((MCLBN_FR_UNIT_SIZE) * 10 + (MCLBN_FP_UNIT_SIZE)) + +#include <stdint.h> // for uint64_t, uint8_t +#include <stdlib.h> // for size_t + + +#if defined(_MSC_VER) + #ifdef MCLBN_DONT_EXPORT + #define MCLBN_DLL_API + #else + #ifdef MCLBN_DLL_EXPORT + #define MCLBN_DLL_API __declspec(dllexport) + #else + #define MCLBN_DLL_API __declspec(dllimport) + #endif + #endif + #ifndef MCLBN_NO_AUTOLINK + #if MCLBN_FP_UNIT_SIZE == 4 + #pragma comment(lib, "mclbn256.lib") + #elif MCLBN_FP_UNIT_SIZE == 6 + #pragma comment(lib, "mclbn384.lib") + #else + #pragma comment(lib, "mclbn512.lib") + #endif + #endif +#elif defined(__EMSCRIPTEN__) && !defined(MCLBN_DONT_EXPORT) + #define MCLBN_DLL_API __attribute__((used)) +#elif defined(__wasm__) && !defined(MCLBN_DONT_EXPORT) + #define MCLBN_DLL_API __attribute__((visibility("default"))) +#else + #define MCLBN_DLL_API +#endif + +#ifdef __EMSCRIPTEN__ + // avoid 64-bit integer + #define mclSize unsigned int + #define mclInt int +#else + // use #define for cgo + #define mclSize size_t + #define mclInt int64_t +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef MCLBN_NOT_DEFINE_STRUCT + +typedef struct mclBnFr mclBnFr; +typedef struct mclBnG1 mclBnG1; +typedef struct mclBnG2 mclBnG2; +typedef struct mclBnGT mclBnGT; + +#else + +typedef struct { + uint64_t d[MCLBN_FR_UNIT_SIZE]; +} mclBnFr; + +typedef struct { + uint64_t d[MCLBN_FP_UNIT_SIZE * 3]; +} mclBnG1; + +typedef struct { + uint64_t d[MCLBN_FP_UNIT_SIZE * 2 * 3]; +} mclBnG2; + +typedef struct { + uint64_t d[MCLBN_FP_UNIT_SIZE * 12]; +} mclBnGT; + +#endif + +#include <mcl/curve_type.h> + +#define MCLBN_IO_SERIALIZE_HEX_STR 2048 +// for backword compatibility +enum { + mclBn_CurveFp254BNb = 0, + mclBn_CurveFp382_1 = 1, + mclBn_CurveFp382_2 = 2, + mclBn_CurveFp462 = 3, + mclBn_CurveSNARK1 = 4, + mclBls12_CurveFp381 = 5 +}; + +/* + init library + @param curve [in] type of bn curve + @param compiledTimeVar [in] specify MCLBN_COMPILED_TIME_VAR, + which macro is used to make sure that the values + are the same when the library is built and used + @return 0 if success + curve = BN254/BN_SNARK1 is allowed if maxUnitSize = 4 + curve = BN381_1/BN381_2/BLS12_381 are allowed if maxUnitSize = 6 + This parameter is used to detect a library compiled with different MCLBN_FP_UNIT_SIZE for safety. + @note not threadsafe + @note BN_init is used in libeay32 +*/ +MCLBN_DLL_API int mclBn_init(int curve, int compiledTimeVar); + + +/* + pairing : G1 x G2 -> GT + #G1 = #G2 = r + G1 is a curve defined on Fp + + serialized size of elements + |Fr| |Fp| + BN254 32 32 + BN381 48 48 + BLS12_381 32 48 + BN462 58 58 + |G1| = |Fp| + |G2| = |G1| * 2 + |GT| = |G1| * 12 +*/ +/* + return the num of Unit(=uint64_t) to store Fr +*/ +MCLBN_DLL_API int mclBn_getOpUnitSize(void); + +/* + return bytes for serialized G1(=Fp) +*/ +MCLBN_DLL_API int mclBn_getG1ByteSize(void); +/* + return bytes for serialized Fr +*/ +MCLBN_DLL_API int mclBn_getFrByteSize(void); + +/* + return decimal string of the order of the curve(=the characteristic of Fr) + return str(buf) if success +*/ +MCLBN_DLL_API mclSize mclBn_getCurveOrder(char *buf, mclSize maxBufSize); + +/* + return decimal string of the characteristic of Fp + return str(buf) if success +*/ +MCLBN_DLL_API mclSize mclBn_getFieldOrder(char *buf, mclSize maxBufSize); + +//////////////////////////////////////////////// +/* + deserialize + return read size if success else 0 +*/ +MCLBN_DLL_API mclSize mclBnFr_deserialize(mclBnFr *x, const void *buf, mclSize bufSize); +MCLBN_DLL_API mclSize mclBnG1_deserialize(mclBnG1 *x, const void *buf, mclSize bufSize); +MCLBN_DLL_API mclSize mclBnG2_deserialize(mclBnG2 *x, const void *buf, mclSize bufSize); +MCLBN_DLL_API mclSize mclBnGT_deserialize(mclBnGT *x, const void *buf, mclSize bufSize); + +/* + serialize + return written byte if sucess else 0 +*/ +MCLBN_DLL_API mclSize mclBnFr_serialize(void *buf, mclSize maxBufSize, const mclBnFr *x); +MCLBN_DLL_API mclSize mclBnG1_serialize(void *buf, mclSize maxBufSize, const mclBnG1 *x); +MCLBN_DLL_API mclSize mclBnG2_serialize(void *buf, mclSize maxBufSize, const mclBnG2 *x); +MCLBN_DLL_API mclSize mclBnGT_serialize(void *buf, mclSize maxBufSize, const mclBnGT *x); + +/* + set string + ioMode + 10 : decimal number + 16 : hexadecimal number + MCLBN_IO_SERIALIZE_HEX_STR : hex string of serialized data + return 0 if success else -1 +*/ +MCLBN_DLL_API int mclBnFr_setStr(mclBnFr *x, const char *buf, mclSize bufSize, int ioMode); +MCLBN_DLL_API int mclBnG1_setStr(mclBnG1 *x, const char *buf, mclSize bufSize, int ioMode); +MCLBN_DLL_API int mclBnG2_setStr(mclBnG2 *x, const char *buf, mclSize bufSize, int ioMode); +MCLBN_DLL_API int mclBnGT_setStr(mclBnGT *x, const char *buf, mclSize bufSize, int ioMode); + +/* + buf is terminated by '\0' + return strlen(buf) if sucess else 0 +*/ +MCLBN_DLL_API mclSize mclBnFr_getStr(char *buf, mclSize maxBufSize, const mclBnFr *x, int ioMode); +MCLBN_DLL_API mclSize mclBnG1_getStr(char *buf, mclSize maxBufSize, const mclBnG1 *x, int ioMode); +MCLBN_DLL_API mclSize mclBnG2_getStr(char *buf, mclSize maxBufSize, const mclBnG2 *x, int ioMode); +MCLBN_DLL_API mclSize mclBnGT_getStr(char *buf, mclSize maxBufSize, const mclBnGT *x, int ioMode); + +// set zero +MCLBN_DLL_API void mclBnFr_clear(mclBnFr *x); + +// set x to y +MCLBN_DLL_API void mclBnFr_setInt(mclBnFr *y, mclInt x); +MCLBN_DLL_API void mclBnFr_setInt32(mclBnFr *y, int x); + +// mask buf with (1 << (bitLen(r) - 1)) - 1 if buf >= r +MCLBN_DLL_API int mclBnFr_setLittleEndian(mclBnFr *x, const void *buf, mclSize bufSize); + +// return 1 if true and 0 otherwise +MCLBN_DLL_API int mclBnFr_isValid(const mclBnFr *x); +MCLBN_DLL_API int mclBnFr_isEqual(const mclBnFr *x, const mclBnFr *y); +MCLBN_DLL_API int mclBnFr_isZero(const mclBnFr *x); +MCLBN_DLL_API int mclBnFr_isOne(const mclBnFr *x); + +#ifndef MCL_DONT_USE_CSRPNG +// return 0 if success +MCLBN_DLL_API int mclBnFr_setByCSPRNG(mclBnFr *x); +#endif + +// hash(s) and set x +// return 0 if success +MCLBN_DLL_API int mclBnFr_setHashOf(mclBnFr *x, const void *buf, mclSize bufSize); + + +MCLBN_DLL_API void mclBnFr_neg(mclBnFr *y, const mclBnFr *x); +MCLBN_DLL_API void mclBnFr_inv(mclBnFr *y, const mclBnFr *x); +MCLBN_DLL_API void mclBnFr_sqr(mclBnFr *y, const mclBnFr *x); +MCLBN_DLL_API void mclBnFr_add(mclBnFr *z, const mclBnFr *x, const mclBnFr *y); +MCLBN_DLL_API void mclBnFr_sub(mclBnFr *z, const mclBnFr *x, const mclBnFr *y); +MCLBN_DLL_API void mclBnFr_mul(mclBnFr *z, const mclBnFr *x, const mclBnFr *y); +MCLBN_DLL_API void mclBnFr_div(mclBnFr *z, const mclBnFr *x, const mclBnFr *y); + +//////////////////////////////////////////////// +// set zero +MCLBN_DLL_API void mclBnG1_clear(mclBnG1 *x); + + +// return 1 if true and 0 otherwise +MCLBN_DLL_API int mclBnG1_isValid(const mclBnG1 *x); +MCLBN_DLL_API int mclBnG1_isEqual(const mclBnG1 *x, const mclBnG1 *y); +MCLBN_DLL_API int mclBnG1_isZero(const mclBnG1 *x); +/* + return 1 if x has a correct order + x is valid point of G1 if and only if + mclBnG1_isValid() is true, which contains mclBnG1_isValidOrder() if mclBn_verifyOrderG1(true) + mclBnG1_isValid() && mclBnG1_isValidOrder() is true if mclBn_verifyOrderG1(false) +*/ +MCLBN_DLL_API int mclBnG1_isValidOrder(const mclBnG1 *x); + +MCLBN_DLL_API int mclBnG1_hashAndMapTo(mclBnG1 *x, const void *buf, mclSize bufSize); + + +MCLBN_DLL_API void mclBnG1_neg(mclBnG1 *y, const mclBnG1 *x); +MCLBN_DLL_API void mclBnG1_dbl(mclBnG1 *y, const mclBnG1 *x); +MCLBN_DLL_API void mclBnG1_normalize(mclBnG1 *y, const mclBnG1 *x); +MCLBN_DLL_API void mclBnG1_add(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y); +MCLBN_DLL_API void mclBnG1_sub(mclBnG1 *z, const mclBnG1 *x, const mclBnG1 *y); +MCLBN_DLL_API void mclBnG1_mul(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y); + +/* + constant time mul +*/ +MCLBN_DLL_API void mclBnG1_mulCT(mclBnG1 *z, const mclBnG1 *x, const mclBnFr *y); + +//////////////////////////////////////////////// +// set zero +MCLBN_DLL_API void mclBnG2_clear(mclBnG2 *x); + +// return 1 if true and 0 otherwise +MCLBN_DLL_API int mclBnG2_isValid(const mclBnG2 *x); +MCLBN_DLL_API int mclBnG2_isEqual(const mclBnG2 *x, const mclBnG2 *y); +MCLBN_DLL_API int mclBnG2_isZero(const mclBnG2 *x); +// return 1 if x has a correct order +MCLBN_DLL_API int mclBnG2_isValidOrder(const mclBnG2 *x); + +MCLBN_DLL_API int mclBnG2_hashAndMapTo(mclBnG2 *x, const void *buf, mclSize bufSize); + +// return written size if sucess else 0 + +MCLBN_DLL_API void mclBnG2_neg(mclBnG2 *y, const mclBnG2 *x); +MCLBN_DLL_API void mclBnG2_dbl(mclBnG2 *y, const mclBnG2 *x); +MCLBN_DLL_API void mclBnG2_normalize(mclBnG2 *y, const mclBnG2 *x); +MCLBN_DLL_API void mclBnG2_add(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y); +MCLBN_DLL_API void mclBnG2_sub(mclBnG2 *z, const mclBnG2 *x, const mclBnG2 *y); +MCLBN_DLL_API void mclBnG2_mul(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y); +/* + constant time mul +*/ +MCLBN_DLL_API void mclBnG2_mulCT(mclBnG2 *z, const mclBnG2 *x, const mclBnFr *y); + +//////////////////////////////////////////////// +// set zero +MCLBN_DLL_API void mclBnGT_clear(mclBnGT *x); +// set x to y +MCLBN_DLL_API void mclBnGT_setInt(mclBnGT *y, mclInt x); +MCLBN_DLL_API void mclBnGT_setInt32(mclBnGT *y, int x); + +// return 1 if true and 0 otherwise +MCLBN_DLL_API int mclBnGT_isEqual(const mclBnGT *x, const mclBnGT *y); +MCLBN_DLL_API int mclBnGT_isZero(const mclBnGT *x); +MCLBN_DLL_API int mclBnGT_isOne(const mclBnGT *x); + +MCLBN_DLL_API void mclBnGT_neg(mclBnGT *y, const mclBnGT *x); +MCLBN_DLL_API void mclBnGT_inv(mclBnGT *y, const mclBnGT *x); +MCLBN_DLL_API void mclBnGT_sqr(mclBnGT *y, const mclBnGT *x); +MCLBN_DLL_API void mclBnGT_add(mclBnGT *z, const mclBnGT *x, const mclBnGT *y); +MCLBN_DLL_API void mclBnGT_sub(mclBnGT *z, const mclBnGT *x, const mclBnGT *y); +MCLBN_DLL_API void mclBnGT_mul(mclBnGT *z, const mclBnGT *x, const mclBnGT *y); +MCLBN_DLL_API void mclBnGT_div(mclBnGT *z, const mclBnGT *x, const mclBnGT *y); + +/* + pow for all elements of Fp12 +*/ +MCLBN_DLL_API void mclBnGT_powGeneric(mclBnGT *z, const mclBnGT *x, const mclBnFr *y); +/* + pow for only {x|x^r = 1} in Fp12 by GLV method + the value generated by pairing satisfies the condition +*/ +MCLBN_DLL_API void mclBnGT_pow(mclBnGT *z, const mclBnGT *x, const mclBnFr *y); + +MCLBN_DLL_API void mclBn_pairing(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y); +MCLBN_DLL_API void mclBn_finalExp(mclBnGT *y, const mclBnGT *x); +MCLBN_DLL_API void mclBn_millerLoop(mclBnGT *z, const mclBnG1 *x, const mclBnG2 *y); + +// return precomputedQcoeffSize * sizeof(Fp6) / sizeof(uint64_t) +MCLBN_DLL_API int mclBn_getUint64NumToPrecompute(void); + +// allocate Qbuf[MCLBN_getUint64NumToPrecompute()] before calling this +MCLBN_DLL_API void mclBn_precomputeG2(uint64_t *Qbuf, const mclBnG2 *Q); + +MCLBN_DLL_API void mclBn_precomputedMillerLoop(mclBnGT *f, const mclBnG1 *P, const uint64_t *Qbuf); +MCLBN_DLL_API void mclBn_precomputedMillerLoop2(mclBnGT *f, const mclBnG1 *P1, const uint64_t *Q1buf, const mclBnG1 *P2, const uint64_t *Q2buf); +MCLBN_DLL_API void mclBn_precomputedMillerLoop2mixed(mclBnGT *f, const mclBnG1 *P1, const mclBnG2 *Q1, const mclBnG1 *P2, const uint64_t *Q2buf); + +/* + Lagrange interpolation + recover out = y(0) by { (xVec[i], yVec[i]) } + return 0 if success else -1 + @note k >= 2, xVec[i] != 0, xVec[i] != xVec[j] for i != j +*/ +MCLBN_DLL_API int mclBn_FrLagrangeInterpolation(mclBnFr *out, const mclBnFr *xVec, const mclBnFr *yVec, mclSize k); +MCLBN_DLL_API int mclBn_G1LagrangeInterpolation(mclBnG1 *out, const mclBnFr *xVec, const mclBnG1 *yVec, mclSize k); +MCLBN_DLL_API int mclBn_G2LagrangeInterpolation(mclBnG2 *out, const mclBnFr *xVec, const mclBnG2 *yVec, mclSize k); + +/* + evaluate polynomial + out = f(x) = c[0] + c[1] * x + c[2] * x^2 + ... + c[cSize - 1] * x^(cSize - 1) + @note cSize >= 2 +*/ +MCLBN_DLL_API int mclBn_FrEvaluatePolynomial(mclBnFr *out, const mclBnFr *cVec, mclSize cSize, const mclBnFr *x); +MCLBN_DLL_API int mclBn_G1EvaluatePolynomial(mclBnG1 *out, const mclBnG1 *cVec, mclSize cSize, const mclBnFr *x); +MCLBN_DLL_API int mclBn_G2EvaluatePolynomial(mclBnG2 *out, const mclBnG2 *cVec, mclSize cSize, const mclBnFr *x); + +/* + verify whether a point of an elliptic curve has order r + This api affetcs setStr(), deserialize() for G2 on BN or G1/G2 on BLS12 + @param doVerify [in] does not verify if zero(default 1) +*/ +MCLBN_DLL_API void mclBn_verifyOrderG1(int doVerify); +MCLBN_DLL_API void mclBn_verifyOrderG2(int doVerify); + +#ifdef __cplusplus +} +#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn.hpp new file mode 100644 index 000000000..8e9a9c652 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn.hpp @@ -0,0 +1,2194 @@ +#pragma once +/** + @file + @brief optimal ate pairing over BN-curve / BLS12-curve + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <mcl/fp_tower.hpp> +#include <mcl/ec.hpp> +#include <mcl/curve_type.h> +#include <assert.h> +#ifndef CYBOZU_DONT_USE_EXCEPTION +#include <vector> +#endif + +/* + set bit size of Fp and Fr +*/ +#ifndef MCL_MAX_FP_BIT_SIZE + #define MCL_MAX_FP_BIT_SIZE 256 +#endif + +#ifndef MCL_MAX_FR_BIT_SIZE + #define MCL_MAX_FR_BIT_SIZE MCL_MAX_FP_BIT_SIZE +#endif +namespace mcl { + +struct CurveParam { + /* + y^2 = x^3 + b + i^2 = -1 + xi = xi_a + i + v^3 = xi + w^2 = v + */ + const char *z; + int b; // y^2 = x^3 + b + int xi_a; // xi = xi_a + i + /* + BN254, BN381 : Dtype + BLS12-381 : Mtype + */ + bool isMtype; + int curveType; // same in curve_type.h + bool operator==(const CurveParam& rhs) const + { + return strcmp(z, rhs.z) == 0 && b == rhs.b && xi_a == rhs.xi_a && isMtype == rhs.isMtype; + } + bool operator!=(const CurveParam& rhs) const { return !operator==(rhs); } +}; + +const CurveParam BN254 = { "-0x4080000000000001", 2, 1, false, MCL_BN254 }; // -(2^62 + 2^55 + 1) +// provisional(experimental) param with maxBitSize = 384 +const CurveParam BN381_1 = { "-0x400011000000000000000001", 2, 1, false, MCL_BN381_1 }; // -(2^94 + 2^76 + 2^72 + 1) // A Family of Implementation-Friendly BN Elliptic Curves +const CurveParam BN381_2 = { "-0x400040090001000000000001", 2, 1, false, MCL_BN381_2 }; // -(2^94 + 2^78 + 2^67 + 2^64 + 2^48 + 1) // used in relic-toolkit +const CurveParam BN462 = { "0x4001fffffffffffffffffffffbfff", 5, 2, false, MCL_BN462 }; // 2^114 + 2^101 - 2^14 - 1 // https://eprint.iacr.org/2017/334 +const CurveParam BN_SNARK1 = { "4965661367192848881", 3, 9, false, MCL_BN_SNARK1 }; +const CurveParam BLS12_381 = { "-0xd201000000010000", 4, 1, true, MCL_BLS12_381 }; +const CurveParam BN160 = { "0x4000000031", 3, 4, false, MCL_BN160 }; + +inline const CurveParam& getCurveParam(int type) +{ + switch (type) { + case MCL_BN254: return mcl::BN254; + case MCL_BN381_1: return mcl::BN381_1; + case MCL_BN381_2: return mcl::BN381_2; + case MCL_BN462: return mcl::BN462; + case MCL_BN_SNARK1: return mcl::BN_SNARK1; + case MCL_BLS12_381: return mcl::BLS12_381; + case MCL_BN160: return mcl::BN160; + default: + assert(0); + return mcl::BN254; + } +} + +namespace bn { + +namespace local { +struct FpTag; +struct FrTag; +} + +typedef mcl::FpT<local::FpTag, MCL_MAX_FP_BIT_SIZE> Fp; +typedef mcl::FpT<local::FrTag, MCL_MAX_FR_BIT_SIZE> Fr; +typedef mcl::Fp2T<Fp> Fp2; +typedef mcl::Fp6T<Fp> Fp6; +typedef mcl::Fp12T<Fp> Fp12; +typedef mcl::EcT<Fp> G1; +typedef mcl::EcT<Fp2> G2; +typedef Fp12 GT; + +typedef mcl::FpDblT<Fp> FpDbl; +typedef mcl::Fp2DblT<Fp> Fp2Dbl; + +inline void Frobenius(Fp2& y, const Fp2& x) +{ + Fp2::Frobenius(y, x); +} +inline void Frobenius(Fp12& y, const Fp12& x) +{ + Fp12::Frobenius(y, x); +} +/* + twisted Frobenius for G2 +*/ +void Frobenius(G2& D, const G2& S); +void Frobenius2(G2& D, const G2& S); +void Frobenius3(G2& D, const G2& S); + +namespace local { + +typedef mcl::FixedArray<int8_t, 128> SignVec; + +inline size_t getPrecomputeQcoeffSize(const SignVec& sv) +{ + size_t idx = 2 + 2; + for (size_t i = 2; i < sv.size(); i++) { + idx++; + if (sv[i]) idx++; + } + return idx; +} + +template<class X, class C, size_t N> +X evalPoly(const X& x, const C (&c)[N]) +{ + X ret = c[N - 1]; + for (size_t i = 1; i < N; i++) { + ret *= x; + ret += c[N - 1 - i]; + } + return ret; +} + +enum TwistBtype { + tb_generic, + tb_1m1i, // 1 - 1i + tb_1m2i // 1 - 2i +}; + +/* + l = (a, b, c) => (a, b * P.y, c * P.x) +*/ +inline void updateLine(Fp6& l, const G1& P) +{ + l.b.a *= P.y; + l.b.b *= P.y; + l.c.a *= P.x; + l.c.b *= P.x; +} + +struct Compress { + Fp12& z_; + Fp2& g1_; + Fp2& g2_; + Fp2& g3_; + Fp2& g4_; + Fp2& g5_; + // z is output area + Compress(Fp12& z, const Fp12& x) + : z_(z) + , g1_(z.getFp2()[4]) + , g2_(z.getFp2()[3]) + , g3_(z.getFp2()[2]) + , g4_(z.getFp2()[1]) + , g5_(z.getFp2()[5]) + { + g2_ = x.getFp2()[3]; + g3_ = x.getFp2()[2]; + g4_ = x.getFp2()[1]; + g5_ = x.getFp2()[5]; + } + Compress(Fp12& z, const Compress& c) + : z_(z) + , g1_(z.getFp2()[4]) + , g2_(z.getFp2()[3]) + , g3_(z.getFp2()[2]) + , g4_(z.getFp2()[1]) + , g5_(z.getFp2()[5]) + { + g2_ = c.g2_; + g3_ = c.g3_; + g4_ = c.g4_; + g5_ = c.g5_; + } + void decompressBeforeInv(Fp2& nume, Fp2& denomi) const + { + assert(&nume != &denomi); + + if (g2_.isZero()) { + Fp2::add(nume, g4_, g4_); + nume *= g5_; + denomi = g3_; + } else { + Fp2 t; + Fp2::sqr(nume, g5_); + Fp2::mul_xi(denomi, nume); + Fp2::sqr(nume, g4_); + Fp2::sub(t, nume, g3_); + t += t; + t += nume; + Fp2::add(nume, denomi, t); + Fp2::divBy4(nume, nume); + denomi = g2_; + } + } + + // output to z + void decompressAfterInv() + { + Fp2& g0 = z_.getFp2()[0]; + Fp2 t0, t1; + // Compute g0. + Fp2::sqr(t0, g1_); + Fp2::mul(t1, g3_, g4_); + t0 -= t1; + t0 += t0; + t0 -= t1; + Fp2::mul(t1, g2_, g5_); + t0 += t1; + Fp2::mul_xi(g0, t0); + g0.a += Fp::one(); + } + +public: + void decompress() // for test + { + Fp2 nume, denomi; + decompressBeforeInv(nume, denomi); + Fp2::inv(denomi, denomi); + g1_ = nume * denomi; // g1 is recoverd. + decompressAfterInv(); + } + /* + 2275clk * 186 = 423Kclk QQQ + */ + static void squareC(Compress& z) + { + Fp2 t0, t1, t2; + Fp2Dbl T0, T1, T2, T3; + Fp2Dbl::sqrPre(T0, z.g4_); + Fp2Dbl::sqrPre(T1, z.g5_); + Fp2Dbl::mul_xi(T2, T1); + T2 += T0; + Fp2Dbl::mod(t2, T2); + Fp2::add(t0, z.g4_, z.g5_); + Fp2Dbl::sqrPre(T2, t0); + T0 += T1; + T2 -= T0; + Fp2Dbl::mod(t0, T2); + Fp2::add(t1, z.g2_, z.g3_); + Fp2Dbl::sqrPre(T3, t1); + Fp2Dbl::sqrPre(T2, z.g2_); + Fp2::mul_xi(t1, t0); + z.g2_ += t1; + z.g2_ += z.g2_; + z.g2_ += t1; + Fp2::sub(t1, t2, z.g3_); + t1 += t1; + Fp2Dbl::sqrPre(T1, z.g3_); + Fp2::add(z.g3_, t1, t2); + Fp2Dbl::mul_xi(T0, T1); + T0 += T2; + Fp2Dbl::mod(t0, T0); + Fp2::sub(z.g4_, t0, z.g4_); + z.g4_ += z.g4_; + z.g4_ += t0; + Fp2Dbl::addPre(T2, T2, T1); + T3 -= T2; + Fp2Dbl::mod(t0, T3); + z.g5_ += t0; + z.g5_ += z.g5_; + z.g5_ += t0; + } + static void square_n(Compress& z, int n) + { + for (int i = 0; i < n; i++) { + squareC(z); + } + } + /* + Exponentiation over compression for: + z = x^Param::z.abs() + */ + static void fixed_power(Fp12& z, const Fp12& x) + { + if (x.isOne()) { + z = 1; + return; + } + Fp12 x_org = x; + Fp12 d62; + Fp2 c55nume, c55denomi, c62nume, c62denomi; + Compress c55(z, x); + square_n(c55, 55); + c55.decompressBeforeInv(c55nume, c55denomi); + Compress c62(d62, c55); + square_n(c62, 62 - 55); + c62.decompressBeforeInv(c62nume, c62denomi); + Fp2 acc; + Fp2::mul(acc, c55denomi, c62denomi); + Fp2::inv(acc, acc); + Fp2 t; + Fp2::mul(t, acc, c62denomi); + Fp2::mul(c55.g1_, c55nume, t); + c55.decompressAfterInv(); + Fp2::mul(t, acc, c55denomi); + Fp2::mul(c62.g1_, c62nume, t); + c62.decompressAfterInv(); + z *= x_org; + z *= d62; + } +}; + +struct MapTo { + Fp c1_; // sqrt(-3) + Fp c2_; // (-1 + sqrt(-3)) / 2 + mpz_class z_; + mpz_class cofactor_; + bool isBN_; + int legendre(bool *pb, const Fp& x) const + { + mpz_class xx; + x.getMpz(pb, xx); + if (!*pb) return 0; + return gmp::legendre(xx, Fp::getOp().mp); + } + int legendre(bool *pb, const Fp2& x) const + { + Fp y; + Fp2::norm(y, x); + return legendre(pb, y); + } + void mulFp(Fp& x, const Fp& y) const + { + x *= y; + } + void mulFp(Fp2& x, const Fp& y) const + { + x.a *= y; + x.b *= y; + } + /* + P.-A. Fouque and M. Tibouchi, + "Indifferentiable hashing to Barreto Naehrig curves," + in Proc. Int. Conf. Cryptol. Inform. Security Latin Amer., 2012, vol. 7533, pp.1-17. + + w = sqrt(-3) t / (1 + b + t^2) + Remark: throw exception if t = 0, c1, -c1 and b = 2 + */ + template<class G, class F> + bool calcBN(G& P, const F& t) const + { + F x, y, w; + bool b; + bool negative = legendre(&b, t) < 0; + if (!b) return false; + if (t.isZero()) return false; + F::sqr(w, t); + w += G::b_; + *w.getFp0() += Fp::one(); + if (w.isZero()) return false; + F::inv(w, w); + mulFp(w, c1_); + w *= t; + for (int i = 0; i < 3; i++) { + switch (i) { + case 0: F::mul(x, t, w); F::neg(x, x); *x.getFp0() += c2_; break; + case 1: F::neg(x, x); *x.getFp0() -= Fp::one(); break; + case 2: F::sqr(x, w); F::inv(x, x); *x.getFp0() += Fp::one(); break; + } + G::getWeierstrass(y, x); + if (F::squareRoot(y, y)) { + if (negative) F::neg(y, y); + P.set(&b, x, y, false); + assert(b); + return true; + } + } + return false; + } + /* + Faster Hashing to G2 + Laura Fuentes-Castaneda, Edward Knapp, Francisco Rodriguez-Henriquez + section 6.1 + for BN + Q = zP + Frob(3zP) + Frob^2(zP) + Frob^3(P) + = -(18x^3 + 12x^2 + 3x + 1)cofactor_ P + */ + void mulByCofactorBN(G2& Q, const G2& P) const + { +#if 0 + G2::mulGeneric(Q, P, cofactor_); +#else +#if 0 + mpz_class t = -(1 + z_ * (3 + z_ * (12 + z_ * 18))); + G2::mulGeneric(Q, P, t * cofactor_); +#else + G2 T0, T1, T2; + /* + G2::mul (GLV method) can't be used because P is not on G2 + */ + G2::mulGeneric(T0, P, z_); + G2::dbl(T1, T0); + T1 += T0; // 3zP + Frobenius(T1, T1); + Frobenius2(T2, T0); + T0 += T1; + T0 += T2; + Frobenius3(T2, P); + G2::add(Q, T0, T2); +#endif +#endif + } + /* + 1.2~1.4 times faster than calBN + */ + template<class G, class F> + void naiveMapTo(G& P, const F& t) const + { + F x = t; + for (;;) { + F y; + G::getWeierstrass(y, x); + if (F::squareRoot(y, y)) { + bool b; + P.set(&b, x, y, false); + assert(b); + return; + } + *x.getFp0() += Fp::one(); + } + } + /* + #(Fp) / r = (z + 1 - t) / r = (z - 1)^2 / 3 + */ + void mulByCofactorBLS12(G1& Q, const G1& P) const + { + G1::mulGeneric(Q, P, cofactor_); + } + /* + Efficient hash maps to G2 on BLS curves + Alessandro Budroni, Federico Pintore + Q = (z(z-1)-1)P + Frob((z-1)P) + Frob^2(2P) + */ + void mulByCofactorBLS12(G2& Q, const G2& P) const + { + G2 T0, T1; + G2::mulGeneric(T0, P, z_ - 1); + G2::mulGeneric(T1, T0, z_); + T1 -= P; + Frobenius(T0, T0); + T0 += T1; + G2::dbl(T1, P); + Frobenius2(T1, T1); + G2::add(Q, T0, T1); + } + /* + cofactor_ is for G2(not used now) + */ + void initBN(const mpz_class& cofactor, const mpz_class &z, int curveType) + { + z_ = z; + cofactor_ = cofactor; + if (curveType == MCL_BN254) { + const char *c1 = "252364824000000126cd890000000003cf0f0000000000060c00000000000004"; + const char *c2 = "25236482400000017080eb4000000006181800000000000cd98000000000000b"; + bool b; + c1_.setStr(&b, c1, 16); + c2_.setStr(&b, c2, 16); + (void)b; + return; + } + bool b = Fp::squareRoot(c1_, -3); + assert(b); + (void)b; + c2_ = (c1_ - 1) / 2; + } + void initBLS12(const mpz_class& z) + { + z_ = z; + // cofactor for G1 + cofactor_ = (z - 1) * (z - 1) / 3; + bool b = Fp::squareRoot(c1_, -3); + assert(b); + (void)b; + c2_ = (c1_ - 1) / 2; + } + void init(const mpz_class& cofactor, const mpz_class &z, bool isBN, int curveType = -1) + { + isBN_ = isBN; + if (isBN_) { + initBN(cofactor, z, curveType); + } else { + initBLS12(z); + } + } + bool calcG1(G1& P, const Fp& t) const + { + if (isBN_) { + if (!calcBN<G1, Fp>(P, t)) return false; + // no subgroup + } else { +#ifdef MCL_USE_OLD_MAPTO_FOR_BLS12 + naiveMapTo<G1, Fp>(P, t); +#else + if (!calcBN<G1, Fp>(P, t)) return false; +#endif + mulByCofactorBLS12(P, P); + } + assert(P.isValid()); + return true; + } + /* + get the element in G2 by multiplying the cofactor + */ + bool calcG2(G2& P, const Fp2& t) const + { + if (isBN_) { + if (!calcBN<G2, Fp2>(P, t)) return false; + mulByCofactorBN(P, P); + } else { +#ifdef MCL_USE_OLD_MAPTO_FOR_BLS12 + naiveMapTo<G1, Fp>(P, t); +#else + if (!calcBN<G2, Fp2>(P, t)) return false; +#endif + mulByCofactorBLS12(P, P); + } + assert(P.isValid()); + return true; + } +}; + +/* + Software implementation of Attribute-Based Encryption: Appendixes + GLV for G1 on BN/BLS12 +*/ +struct GLV1 { + Fp rw; // rw = 1 / w = (-1 - sqrt(-3)) / 2 + size_t rBitSize; + mpz_class v0, v1; + mpz_class B[2][2]; + mpz_class r; +private: + bool usePrecomputedTable(int curveType) + { + if (curveType < 0) return false; + const struct Tbl { + int curveType; + const char *rw; + size_t rBitSize; + const char *v0, *v1; + const char *B[2][2]; + const char *r; + } tbl[] = { + { + MCL_BN254, + "49b36240000000024909000000000006cd80000000000007", + 256, + "2a01fab7e04a017b9c0eb31ff36bf3357", + "37937ca688a6b4904", + { + { + "61818000000000028500000000000004", + "8100000000000001", + }, + { + "8100000000000001", + "-61818000000000020400000000000003", + }, + }, + "2523648240000001ba344d8000000007ff9f800000000010a10000000000000d", + }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + if (tbl[i].curveType != curveType) continue; + bool b; + rw.setStr(&b, tbl[i].rw, 16); if (!b) continue; + rBitSize = tbl[i].rBitSize; + mcl::gmp::setStr(&b, v0, tbl[i].v0, 16); if (!b) continue; + mcl::gmp::setStr(&b, v1, tbl[i].v1, 16); if (!b) continue; + mcl::gmp::setStr(&b, B[0][0], tbl[i].B[0][0], 16); if (!b) continue; + mcl::gmp::setStr(&b, B[0][1], tbl[i].B[0][1], 16); if (!b) continue; + mcl::gmp::setStr(&b, B[1][0], tbl[i].B[1][0], 16); if (!b) continue; + mcl::gmp::setStr(&b, B[1][1], tbl[i].B[1][1], 16); if (!b) continue; + mcl::gmp::setStr(&b, r, tbl[i].r, 16); if (!b) continue; + return true; + } + return false; + } +public: + bool operator==(const GLV1& rhs) const + { + return rw == rhs.rw && rBitSize == rhs.rBitSize && v0 == rhs.v0 && v1 == rhs.v1 + && B[0][0] == rhs.B[0][0] && B[0][1] == rhs.B[0][1] && B[1][0] == rhs.B[1][0] + && B[1][1] == rhs.B[1][1] && r == rhs.r; + } + bool operator!=(const GLV1& rhs) const { return !operator==(rhs); } +#ifndef CYBOZU_DONT_USE_STRING + void dump(const mpz_class& x) const + { + printf("\"%s\",\n", mcl::gmp::getStr(x, 16).c_str()); + } + void dump() const + { + printf("\"%s\",\n", rw.getStr(16).c_str()); + printf("%d,\n", (int)rBitSize); + dump(v0); + dump(v1); + dump(B[0][0]); dump(B[0][1]); dump(B[1][0]); dump(B[1][1]); + dump(r); + } +#endif + void init(const mpz_class& r, const mpz_class& z, bool isBLS12 = false, int curveType = -1) + { + if (usePrecomputedTable(curveType)) return; + bool b = Fp::squareRoot(rw, -3); + assert(b); + (void)b; + rw = -(rw + 1) / 2; + this->r = r; + rBitSize = gmp::getBitSize(r); + rBitSize = (rBitSize + fp::UnitBitSize - 1) & ~(fp::UnitBitSize - 1);// a little better size + if (isBLS12) { + /* + BLS12 + L = z^4 + (-z^2+1) + L = 0 + 1 + z^2 L = 0 + */ + B[0][0] = -z * z + 1; + B[0][1] = 1; + B[1][0] = 1; + B[1][1] = z * z; + } else { + /* + BN + L = 36z^4 - 1 + (6z^2+2z) - (2z+1) L = 0 + (-2z-1) - (6z^2+4z+1)L = 0 + */ + B[0][0] = 6 * z * z + 2 * z; + B[0][1] = -2 * z - 1; + B[1][0] = -2 * z - 1; + B[1][1] = -6 * z * z - 4 * z - 1; + } + // [v0 v1] = [r 0] * B^(-1) + v0 = ((-B[1][1]) << rBitSize) / r; + v1 = ((B[1][0]) << rBitSize) / r; + } + /* + L = lambda = p^4 + L (x, y) = (rw x, y) + */ + void mulLambda(G1& Q, const G1& P) const + { + Fp::mul(Q.x, P.x, rw); + Q.y = P.y; + Q.z = P.z; + } + /* + x = a + b * lambda mod r + */ + void split(mpz_class& a, mpz_class& b, const mpz_class& x) const + { + mpz_class t; + t = (x * v0) >> rBitSize; + b = (x * v1) >> rBitSize; + a = x - (t * B[0][0] + b * B[1][0]); + b = - (t * B[0][1] + b * B[1][1]); + } + void mul(G1& Q, const G1& P, mpz_class x, bool constTime = false) const + { + typedef mcl::fp::Unit Unit; + const size_t maxUnit = 512 / 2 / mcl::fp::UnitBitSize; + const int splitN = 2; + mpz_class u[splitN]; + G1 in[splitN]; + G1 tbl[4]; + int bitTbl[splitN]; // bit size of u[i] + Unit w[splitN][maxUnit]; // unit array of u[i] + int maxBit = 0; // max bit of u[i] + int maxN = 0; + int remainBit = 0; + + x %= r; + if (x == 0) { + Q.clear(); + if (constTime) goto DummyLoop; + return; + } + if (x < 0) { + x += r; + } + split(u[0], u[1], x); + in[0] = P; + mulLambda(in[1], in[0]); + for (int i = 0; i < splitN; i++) { + if (u[i] < 0) { + u[i] = -u[i]; + G1::neg(in[i], in[i]); + } + in[i].normalize(); + } +#if 0 + G1::mulGeneric(in[0], in[0], u[0]); + G1::mulGeneric(in[1], in[1], u[1]); + G1::add(Q, in[0], in[1]); + return; +#else + tbl[0] = in[0]; // dummy + tbl[1] = in[0]; + tbl[2] = in[1]; + G1::add(tbl[3], in[0], in[1]); + tbl[3].normalize(); + for (int i = 0; i < splitN; i++) { + bool b; + mcl::gmp::getArray(&b, w[i], maxUnit, u[i]); + assert(b); + bitTbl[i] = (int)mcl::gmp::getBitSize(u[i]); + maxBit = fp::max_(maxBit, bitTbl[i]); + } + assert(maxBit > 0); + maxBit--; + /* + maxBit = maxN * UnitBitSize + remainBit + 0 < remainBit <= UnitBitSize + */ + maxN = maxBit / mcl::fp::UnitBitSize; + remainBit = maxBit % mcl::fp::UnitBitSize; + remainBit++; + Q.clear(); + for (int i = maxN; i >= 0; i--) { + for (int j = remainBit - 1; j >= 0; j--) { + G1::dbl(Q, Q); + uint32_t b0 = (w[0][i] >> j) & 1; + uint32_t b1 = (w[1][i] >> j) & 1; + uint32_t c = b1 * 2 + b0; + if (c == 0) { + if (constTime) tbl[0] += tbl[1]; + } else { + Q += tbl[c]; + } + } + remainBit = (int)mcl::fp::UnitBitSize; + } +#endif + DummyLoop: + if (!constTime) return; + const int limitBit = (int)rBitSize / splitN; + G1 D = tbl[0]; + for (int i = maxBit + 1; i < limitBit; i++) { + G1::dbl(D, D); + D += tbl[0]; + } + } +}; + +/* + GLV method for G2 and GT on BN/BLS12 +*/ +struct GLV2 { + size_t rBitSize; + mpz_class B[4][4]; + mpz_class r; + mpz_class v[4]; + mpz_class z; + mpz_class abs_z; + bool isBLS12; + GLV2() : rBitSize(0), isBLS12(false) {} + void init(const mpz_class& r, const mpz_class& z, bool isBLS12 = false) + { + this->r = r; + this->z = z; + this->abs_z = z < 0 ? -z : z; + this->isBLS12 = isBLS12; + rBitSize = mcl::gmp::getBitSize(r); + rBitSize = (rBitSize + mcl::fp::UnitBitSize - 1) & ~(mcl::fp::UnitBitSize - 1);// a little better size + mpz_class z2p1 = z * 2 + 1; + B[0][0] = z + 1; + B[0][1] = z; + B[0][2] = z; + B[0][3] = -2 * z; + B[1][0] = z2p1; + B[1][1] = -z; + B[1][2] = -(z + 1); + B[1][3] = -z; + B[2][0] = 2 * z; + B[2][1] = z2p1; + B[2][2] = z2p1; + B[2][3] = z2p1; + B[3][0] = z - 1; + B[3][1] = 2 * z2p1; + B[3][2] = -2 * z + 1; + B[3][3] = z - 1; + /* + v[] = [r 0 0 0] * B^(-1) = [2z^2+3z+1, 12z^3+8z^2+z, 6z^3+4z^2+z, -(2z+1)] + */ + const char *zBN254 = "-4080000000000001"; + mpz_class t; + bool b; + mcl::gmp::setStr(&b, t, zBN254, 16); + assert(b); + (void)b; + if (z == t) { + static const char *vTblBN254[] = { + "e00a8e7f56e007e5b09fe7fdf43ba998", + "-152aff56a8054abf9da75db2da3d6885101e5fd3997d41cb1", + "-a957fab5402a55fced3aed96d1eb44295f40f136ee84e09b", + "-e00a8e7f56e007e929d7b2667ea6f29c", + }; + for (int i = 0; i < 4; i++) { + mcl::gmp::setStr(&b, v[i], vTblBN254[i], 16); + assert(b); + (void)b; + } + } else { + v[0] = ((1 + z * (3 + z * 2)) << rBitSize) / r; + v[1] = ((z * (1 + z * (8 + z * 12))) << rBitSize) / r; + v[2] = ((z * (1 + z * (4 + z * 6))) << rBitSize) / r; + v[3] = -((z * (1 + z * 2)) << rBitSize) / r; + } + } + /* + u[] = [x, 0, 0, 0] - v[] * x * B + */ + void split(mpz_class u[4], const mpz_class& x) const + { + if (isBLS12) { + /* + Frob(P) = zP + x = u[0] + u[1] z + u[2] z^2 + u[3] z^3 + */ + bool isNeg = false; + mpz_class t = x; + if (t < 0) { + t = -t; + isNeg = true; + } + for (int i = 0; i < 4; i++) { + // t = t / abs_z, u[i] = t % abs_z + mcl::gmp::divmod(t, u[i], t, abs_z); + if (((z < 0) && (i & 1)) ^ isNeg) { + u[i] = -u[i]; + } + } + return; + } + // BN + mpz_class t[4]; + for (int i = 0; i < 4; i++) { + t[i] = (x * v[i]) >> rBitSize; + } + for (int i = 0; i < 4; i++) { + u[i] = (i == 0) ? x : 0; + for (int j = 0; j < 4; j++) { + u[i] -= t[j] * B[j][i]; + } + } + } + template<class T> + void mul(T& Q, const T& P, mpz_class x, bool constTime = false) const + { +#if 0 // #ifndef NDEBUG + { + T R; + T::mulGeneric(R, P, r); + assert(R.isZero()); + } +#endif + typedef mcl::fp::Unit Unit; + const size_t maxUnit = 512 / 2 / mcl::fp::UnitBitSize; + const int splitN = 4; + mpz_class u[splitN]; + T in[splitN]; + T tbl[16]; + int bitTbl[splitN]; // bit size of u[i] + Unit w[splitN][maxUnit]; // unit array of u[i] + int maxBit = 0; // max bit of u[i] + int maxN = 0; + int remainBit = 0; + + x %= r; + if (x == 0) { + Q.clear(); + if (constTime) goto DummyLoop; + return; + } + if (x < 0) { + x += r; + } + split(u, x); + in[0] = P; + Frobenius(in[1], in[0]); + Frobenius(in[2], in[1]); + Frobenius(in[3], in[2]); + for (int i = 0; i < splitN; i++) { + if (u[i] < 0) { + u[i] = -u[i]; + T::neg(in[i], in[i]); + } +// in[i].normalize(); // slow + } +#if 0 + for (int i = 0; i < splitN; i++) { + T::mulGeneric(in[i], in[i], u[i]); + } + T::add(Q, in[0], in[1]); + Q += in[2]; + Q += in[3]; + return; +#else + tbl[0] = in[0]; + for (size_t i = 1; i < 16; i++) { + tbl[i].clear(); + if (i & 1) { + tbl[i] += in[0]; + } + if (i & 2) { + tbl[i] += in[1]; + } + if (i & 4) { + tbl[i] += in[2]; + } + if (i & 8) { + tbl[i] += in[3]; + } +// tbl[i].normalize(); + } + for (int i = 0; i < splitN; i++) { + bool b; + mcl::gmp::getArray(&b, w[i], maxUnit, u[i]); + assert(b); + bitTbl[i] = (int)mcl::gmp::getBitSize(u[i]); + maxBit = fp::max_(maxBit, bitTbl[i]); + } + maxBit--; + /* + maxBit = maxN * UnitBitSize + remainBit + 0 < remainBit <= UnitBitSize + */ + maxN = maxBit / mcl::fp::UnitBitSize; + remainBit = maxBit % mcl::fp::UnitBitSize; + remainBit++; + Q.clear(); + for (int i = maxN; i >= 0; i--) { + for (int j = remainBit - 1; j >= 0; j--) { + T::dbl(Q, Q); + uint32_t b0 = (w[0][i] >> j) & 1; + uint32_t b1 = (w[1][i] >> j) & 1; + uint32_t b2 = (w[2][i] >> j) & 1; + uint32_t b3 = (w[3][i] >> j) & 1; + uint32_t c = b3 * 8 + b2 * 4 + b1 * 2 + b0; + if (c == 0) { + if (constTime) tbl[0] += tbl[1]; + } else { + Q += tbl[c]; + } + } + remainBit = (int)mcl::fp::UnitBitSize; + } +#endif + DummyLoop: + if (!constTime) return; + const int limitBit = (int)rBitSize / splitN; + T D = tbl[0]; + for (int i = maxBit + 1; i < limitBit; i++) { + T::dbl(D, D); + D += tbl[0]; + } + } + void pow(Fp12& z, const Fp12& x, mpz_class y, bool constTime = false) const + { + typedef GroupMtoA<Fp12> AG; // as additive group + AG& _z = static_cast<AG&>(z); + const AG& _x = static_cast<const AG&>(x); + mul(_z, _x, y, constTime); + } +}; + +struct Param { + CurveParam cp; + mpz_class z; + mpz_class abs_z; + bool isNegative; + bool isBLS12; + mpz_class p; + mpz_class r; + local::MapTo mapTo; + local::GLV1 glv1; + local::GLV2 glv2; + // for G2 Frobenius + Fp2 g2; + Fp2 g3; + /* + Dtype twist + (x', y') = phi(x, y) = (x/w^2, y/w^3) + y^2 = x^3 + b + => (y'w^3)^2 = (x'w^2)^3 + b + => y'^2 = x'^3 + b / w^6 ; w^6 = xi + => y'^2 = x'^3 + twist_b; + */ + Fp2 twist_b; + local::TwistBtype twist_b_type; +/* + mpz_class exp_c0; + mpz_class exp_c1; + mpz_class exp_c2; + mpz_class exp_c3; +*/ + + // Loop parameter for the Miller loop part of opt. ate pairing. + local::SignVec siTbl; + size_t precomputedQcoeffSize; + bool useNAF; + local::SignVec zReplTbl; + + void init(bool *pb, const mcl::CurveParam& cp, fp::Mode mode) + { + this->cp = cp; + isBLS12 = cp.curveType == MCL_BLS12_381; + gmp::setStr(pb, z, cp.z); + if (!*pb) return; + isNegative = z < 0; + if (isNegative) { + abs_z = -z; + } else { + abs_z = z; + } + if (isBLS12) { + mpz_class z2 = z * z; + mpz_class z4 = z2 * z2; + r = z4 - z2 + 1; + p = z - 1; + p = p * p * r / 3 + z; + } else { + const int pCoff[] = { 1, 6, 24, 36, 36 }; + const int rCoff[] = { 1, 6, 18, 36, 36 }; + p = local::evalPoly(z, pCoff); + assert((p % 6) == 1); + r = local::evalPoly(z, rCoff); + } + Fp::init(pb, p, mode); + if (!*pb) return; + Fr::init(pb, r, mode); + if (!*pb) return; + Fp2::init(cp.xi_a); + Fp2 xi(cp.xi_a, 1); + g2 = Fp2::get_gTbl()[0]; + g3 = Fp2::get_gTbl()[3]; + if (cp.isMtype) { + Fp2::inv(g2, g2); + Fp2::inv(g3, g3); + } + if (cp.isMtype) { + twist_b = Fp2(cp.b) * xi; + } else { + if (cp.b == 2 && cp.xi_a == 1) { + twist_b = Fp2(1, -1); // shortcut + } else { + twist_b = Fp2(cp.b) / xi; + } + } + if (twist_b == Fp2(1, -1)) { + twist_b_type = tb_1m1i; + } else if (twist_b == Fp2(1, -2)) { + twist_b_type = tb_1m2i; + } else { + twist_b_type = tb_generic; + } + G1::init(0, cp.b, mcl::ec::Proj); + if (isBLS12) { + G1::setOrder(r); + } + G2::init(0, twist_b, mcl::ec::Proj); + G2::setOrder(r); + + const mpz_class largest_c = isBLS12 ? abs_z : gmp::abs(z * 6 + 2); + useNAF = gmp::getNAF(siTbl, largest_c); + precomputedQcoeffSize = local::getPrecomputeQcoeffSize(siTbl); + gmp::getNAF(zReplTbl, gmp::abs(z)); +/* + if (isBLS12) { + mpz_class z2 = z * z; + mpz_class z3 = z2 * z; + mpz_class z4 = z3 * z; + mpz_class z5 = z4 * z; + exp_c0 = z5 - 2 * z4 + 2 * z2 - z + 3; + exp_c1 = z4 - 2 * z3 + 2 * z - 1; + exp_c2 = z3 - 2 * z2 + z; + exp_c3 = z2 - 2 * z + 1; + } else { + exp_c0 = -2 + z * (-18 + z * (-30 - 36 * z)); + exp_c1 = 1 + z * (-12 + z * (-18 - 36 * z)); + exp_c2 = 6 * z * z + 1; + } +*/ + if (isBLS12) { + mapTo.init(0, z, false); + } else { + mapTo.init(2 * p - r, z, true, cp.curveType); + } + glv1.init(r, z, isBLS12, cp.curveType); + glv2.init(r, z, isBLS12); + *pb = true; + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void init(const mcl::CurveParam& cp, fp::Mode mode) + { + bool b; + init(&b, cp, mode); + if (!b) throw cybozu::Exception("Param:init"); + } +#endif +}; + +template<size_t dummyImpl = 0> +struct StaticVar { + static local::Param param; +}; + +template<size_t dummyImpl> +local::Param StaticVar<dummyImpl>::param; + +} // mcl::bn::local + +namespace BN { + +static const local::Param& param = local::StaticVar<>::param; + +} // mcl::bn::BN + +namespace local { + +inline void mulArrayGLV1(G1& z, const G1& x, const mcl::fp::Unit *y, size_t yn, bool isNegative, bool constTime) +{ + mpz_class s; + bool b; + mcl::gmp::setArray(&b, s, y, yn); + assert(b); + if (isNegative) s = -s; + BN::param.glv1.mul(z, x, s, constTime); +} +inline void mulArrayGLV2(G2& z, const G2& x, const mcl::fp::Unit *y, size_t yn, bool isNegative, bool constTime) +{ + mpz_class s; + bool b; + mcl::gmp::setArray(&b, s, y, yn); + assert(b); + if (isNegative) s = -s; + BN::param.glv2.mul(z, x, s, constTime); +} +inline void powArrayGLV2(Fp12& z, const Fp12& x, const mcl::fp::Unit *y, size_t yn, bool isNegative, bool constTime) +{ + mpz_class s; + bool b; + mcl::gmp::setArray(&b, s, y, yn); + assert(b); + if (isNegative) s = -s; + BN::param.glv2.pow(z, x, s, constTime); +} + +/* + Faster Squaring in the Cyclotomic Subgroup of Sixth Degree Extensions + Robert Granger, Michael Scott +*/ +inline void sqrFp4(Fp2& z0, Fp2& z1, const Fp2& x0, const Fp2& x1) +{ +#if 1 + Fp2Dbl T0, T1, T2; + Fp2Dbl::sqrPre(T0, x0); + Fp2Dbl::sqrPre(T1, x1); + Fp2Dbl::mul_xi(T2, T1); + Fp2Dbl::add(T2, T2, T0); + Fp2::add(z1, x0, x1); + Fp2Dbl::mod(z0, T2); + Fp2Dbl::sqrPre(T2, z1); + Fp2Dbl::sub(T2, T2, T0); + Fp2Dbl::sub(T2, T2, T1); + Fp2Dbl::mod(z1, T2); +#else + Fp2 t0, t1, t2; + Fp2::sqr(t0, x0); + Fp2::sqr(t1, x1); + Fp2::mul_xi(z0, t1); + z0 += t0; + Fp2::add(z1, x0, x1); + Fp2::sqr(z1, z1); + z1 -= t0; + z1 -= t1; +#endif +} + +inline void fasterSqr(Fp12& y, const Fp12& x) +{ +#if 0 + Fp12::sqr(y, x); +#else + const Fp2& x0(x.a.a); + const Fp2& x4(x.a.b); + const Fp2& x3(x.a.c); + const Fp2& x2(x.b.a); + const Fp2& x1(x.b.b); + const Fp2& x5(x.b.c); + Fp2& y0(y.a.a); + Fp2& y4(y.a.b); + Fp2& y3(y.a.c); + Fp2& y2(y.b.a); + Fp2& y1(y.b.b); + Fp2& y5(y.b.c); + Fp2 t0, t1; + sqrFp4(t0, t1, x0, x1); + Fp2::sub(y0, t0, x0); + y0 += y0; + y0 += t0; + Fp2::add(y1, t1, x1); + y1 += y1; + y1 += t1; + Fp2 t2, t3; + sqrFp4(t0, t1, x2, x3); + sqrFp4(t2, t3, x4, x5); + Fp2::sub(y4, t0, x4); + y4 += y4; + y4 += t0; + Fp2::add(y5, t1, x5); + y5 += y5; + y5 += t1; + Fp2::mul_xi(t0, t3); + Fp2::add(y2, t0, x2); + y2 += y2; + y2 += t0; + Fp2::sub(y3, t2, x3); + y3 += y3; + y3 += t2; +#endif +} + +/* + y = x^z if z > 0 + = unitaryInv(x^(-z)) if z < 0 +*/ +inline void pow_z(Fp12& y, const Fp12& x) +{ +#if 1 + if (BN::param.cp.curveType == MCL_BN254) { + Compress::fixed_power(y, x); + } else { + Fp12 orgX = x; + y = x; + Fp12 conj; + conj.a = x.a; + Fp6::neg(conj.b, x.b); + for (size_t i = 1; i < BN::param.zReplTbl.size(); i++) { + fasterSqr(y, y); + if (BN::param.zReplTbl[i] > 0) { + y *= orgX; + } else if (BN::param.zReplTbl[i] < 0) { + y *= conj; + } + } + } +#else + Fp12::pow(y, x, param.abs_z); +#endif + if (BN::param.isNegative) { + Fp12::unitaryInv(y, y); + } +} +inline void mul_twist_b(Fp2& y, const Fp2& x) +{ + switch (BN::param.twist_b_type) { + case local::tb_1m1i: + /* + b / xi = 1 - 1i + (a + bi)(1 - 1i) = (a + b) + (b - a)i + */ + { + Fp t; + Fp::add(t, x.a, x.b); + Fp::sub(y.b, x.b, x.a); + y.a = t; + } + return; + case local::tb_1m2i: + /* + b / xi = 1 - 2i + (a + bi)(1 - 2i) = (a + 2b) + (b - 2a)i + */ + { + Fp t; + Fp::sub(t, x.b, x.a); + t -= x.a; + Fp::add(y.a, x.a, x.b); + y.a += x.b; + y.b = t; + } + return; + case local::tb_generic: + Fp2::mul(y, x, BN::param.twist_b); + return; + } +} + +inline void dblLineWithoutP(Fp6& l, G2& Q) +{ + Fp2 t0, t1, t2, t3, t4, t5; + Fp2Dbl T0, T1; + Fp2::sqr(t0, Q.z); + Fp2::mul(t4, Q.x, Q.y); + Fp2::sqr(t1, Q.y); + Fp2::add(t3, t0, t0); + Fp2::divBy2(t4, t4); + Fp2::add(t5, t0, t1); + t0 += t3; + mul_twist_b(t2, t0); + Fp2::sqr(t0, Q.x); + Fp2::add(t3, t2, t2); + t3 += t2; + Fp2::sub(Q.x, t1, t3); + t3 += t1; + Q.x *= t4; + Fp2::divBy2(t3, t3); + Fp2Dbl::sqrPre(T0, t3); + Fp2Dbl::sqrPre(T1, t2); + Fp2Dbl::sub(T0, T0, T1); + Fp2Dbl::add(T1, T1, T1); + Fp2Dbl::sub(T0, T0, T1); + Fp2::add(t3, Q.y, Q.z); + Fp2Dbl::mod(Q.y, T0); + Fp2::sqr(t3, t3); + t3 -= t5; + Fp2::mul(Q.z, t1, t3); + Fp2::sub(l.a, t2, t1); + l.c = t0; + l.b = t3; +} +inline void addLineWithoutP(Fp6& l, G2& R, const G2& Q) +{ + Fp2 t1, t2, t3, t4; + Fp2Dbl T1, T2; + Fp2::mul(t1, R.z, Q.x); + Fp2::mul(t2, R.z, Q.y); + Fp2::sub(t1, R.x, t1); + Fp2::sub(t2, R.y, t2); + Fp2::sqr(t3, t1); + Fp2::mul(R.x, t3, R.x); + Fp2::sqr(t4, t2); + t3 *= t1; + t4 *= R.z; + t4 += t3; + t4 -= R.x; + t4 -= R.x; + R.x -= t4; + Fp2Dbl::mulPre(T1, t2, R.x); + Fp2Dbl::mulPre(T2, t3, R.y); + Fp2Dbl::sub(T2, T1, T2); + Fp2Dbl::mod(R.y, T2); + Fp2::mul(R.x, t1, t4); + Fp2::mul(R.z, t3, R.z); + Fp2::neg(l.c, t2); + Fp2Dbl::mulPre(T1, t2, Q.x); + Fp2Dbl::mulPre(T2, t1, Q.y); + Fp2Dbl::sub(T1, T1, T2); + l.b = t1; + Fp2Dbl::mod(l.a, T1); +} +inline void dblLine(Fp6& l, G2& Q, const G1& P) +{ + dblLineWithoutP(l, Q); + local::updateLine(l, P); +} +inline void addLine(Fp6& l, G2& R, const G2& Q, const G1& P) +{ + addLineWithoutP(l, R, Q); + local::updateLine(l, P); +} +inline void mulFp6cb_by_G1xy(Fp6& y, const Fp6& x, const G1& P) +{ + assert(P.isNormalized()); + if (&y != &x) y.a = x.a; + Fp2::mulFp(y.c, x.c, P.x); + Fp2::mulFp(y.b, x.b, P.y); +} + +/* + x = a + bv + cv^2 + y = (y0, y4, y2) -> (y0, 0, y2, 0, y4, 0) + z = xy = (a + bv + cv^2)(d + ev) + = (ad + ce xi) + ((a + b)(d + e) - ad - be)v + (be + cd)v^2 +*/ +inline void Fp6mul_01(Fp6& z, const Fp6& x, const Fp2& d, const Fp2& e) +{ + const Fp2& a = x.a; + const Fp2& b = x.b; + const Fp2& c = x.c; + Fp2 t0, t1; + Fp2Dbl AD, CE, BE, CD, T; + Fp2Dbl::mulPre(AD, a, d); + Fp2Dbl::mulPre(CE, c, e); + Fp2Dbl::mulPre(BE, b, e); + Fp2Dbl::mulPre(CD, c, d); + Fp2::add(t0, a, b); + Fp2::add(t1, d, e); + Fp2Dbl::mulPre(T, t0, t1); + T -= AD; + T -= BE; + Fp2Dbl::mod(z.b, T); + Fp2Dbl::mul_xi(CE, CE); + AD += CE; + Fp2Dbl::mod(z.a, AD); + BE += CD; + Fp2Dbl::mod(z.c, BE); +} +/* + input + z = (z0 + z1v + z2v^2) + (z3 + z4v + z5v^2)w = Z0 + Z1w + 0 3 4 + x = (a, b, c) -> (b, 0, 0, c, a, 0) = X0 + X1w + X0 = b = (b, 0, 0) + X1 = c + av = (c, a, 0) + w^2 = v, v^3 = xi + output + z <- zx = (Z0X0 + Z1X1v) + ((Z0 + Z1)(X0 + X1) - Z0X0 - Z1X1)w + Z0X0 = Z0 b + Z1X1 = Z1 (c, a, 0) + (Z0 + Z1)(X0 + X1) = (Z0 + Z1) (b + c, a, 0) +*/ +inline void mul_403(Fp12& z, const Fp6& x) +{ + const Fp2& a = x.a; + const Fp2& b = x.b; + const Fp2& c = x.c; +#if 1 + Fp6& z0 = z.a; + Fp6& z1 = z.b; + Fp6 z0x0, z1x1, t0; + Fp2 t1; + Fp2::add(t1, x.b, c); + Fp6::add(t0, z0, z1); + Fp2::mul(z0x0.a, z0.a, b); + Fp2::mul(z0x0.b, z0.b, b); + Fp2::mul(z0x0.c, z0.c, b); + Fp6mul_01(z1x1, z1, c, a); + Fp6mul_01(t0, t0, t1, a); + Fp6::sub(z.b, t0, z0x0); + z.b -= z1x1; + // a + bv + cv^2 = cxi + av + bv^2 + Fp2::mul_xi(z1x1.c, z1x1.c); + Fp2::add(z.a.a, z0x0.a, z1x1.c); + Fp2::add(z.a.b, z0x0.b, z1x1.a); + Fp2::add(z.a.c, z0x0.c, z1x1.b); +#else + Fp2& z0 = z.a.a; + Fp2& z1 = z.a.b; + Fp2& z2 = z.a.c; + Fp2& z3 = z.b.a; + Fp2& z4 = z.b.b; + Fp2& z5 = z.b.c; + Fp2Dbl Z0B, Z1B, Z2B, Z3C, Z4C, Z5C; + Fp2Dbl T0, T1, T2, T3, T4, T5; + Fp2 bc, t; + Fp2::addPre(bc, b, c); + Fp2::addPre(t, z5, z2); + Fp2Dbl::mulPre(T5, t, bc); + Fp2Dbl::mulPre(Z5C, z5, c); + Fp2Dbl::mulPre(Z2B, z2, b); + Fp2Dbl::sub(T5, T5, Z5C); + Fp2Dbl::sub(T5, T5, Z2B); + Fp2Dbl::mulPre(T0, z1, a); + T5 += T0; + + Fp2::addPre(t, z4, z1); + Fp2Dbl::mulPre(T4, t, bc); + Fp2Dbl::mulPre(Z4C, z4, c); + Fp2Dbl::mulPre(Z1B, z1, b); + Fp2Dbl::sub(T4, T4, Z4C); + Fp2Dbl::sub(T4, T4, Z1B); + Fp2Dbl::mulPre(T0, z0, a); + T4 += T0; + + Fp2::addPre(t, z3, z0); + Fp2Dbl::mulPre(T3, t, bc); + Fp2Dbl::mulPre(Z3C, z3, c); + Fp2Dbl::mulPre(Z0B, z0, b); + Fp2Dbl::sub(T3, T3, Z3C); + Fp2Dbl::sub(T3, T3, Z0B); + Fp2::mul_xi(t, z2); + Fp2Dbl::mulPre(T0, t, a); + T3 += T0; + + Fp2Dbl::mulPre(T2, z3, a); + T2 += Z2B; + T2 += Z4C; + + Fp2::mul_xi(t, z5); + Fp2Dbl::mulPre(T1, t, a); + T1 += Z1B; + T1 += Z3C; + + Fp2Dbl::mulPre(T0, z4, a); + T0 += Z5C; + Fp2Dbl::mul_xi(T0, T0); + T0 += Z0B; + + Fp2Dbl::mod(z0, T0); + Fp2Dbl::mod(z1, T1); + Fp2Dbl::mod(z2, T2); + Fp2Dbl::mod(z3, T3); + Fp2Dbl::mod(z4, T4); + Fp2Dbl::mod(z5, T5); +#endif +} +/* + input + z = (z0 + z1v + z2v^2) + (z3 + z4v + z5v^2)w = Z0 + Z1w + 0 1 4 + x = (a, b, c) -> (a, c, 0, 0, b, 0) = X0 + X1w + X0 = (a, c, 0) + X1 = (0, b, 0) + w^2 = v, v^3 = xi + output + z <- zx = (Z0X0 + Z1X1v) + ((Z0 + Z1)(X0 + X1) - Z0X0 - Z1X1)w + Z0X0 = Z0 (a, c, 0) + Z1X1 = Z1 (0, b, 0) = Z1 bv + (Z0 + Z1)(X0 + X1) = (Z0 + Z1) (a, b + c, 0) + + (a + bv + cv^2)v = c xi + av + bv^2 +*/ +inline void mul_041(Fp12& z, const Fp6& x) +{ + const Fp2& a = x.a; + const Fp2& b = x.b; + const Fp2& c = x.c; + Fp6& z0 = z.a; + Fp6& z1 = z.b; + Fp6 z0x0, z1x1, t0; + Fp2 t1; + Fp2::mul(z1x1.a, z1.c, b); + Fp2::mul_xi(z1x1.a, z1x1.a); + Fp2::mul(z1x1.b, z1.a, b); + Fp2::mul(z1x1.c, z1.b, b); + Fp2::add(t1, x.b, c); + Fp6::add(t0, z0, z1); + Fp6mul_01(z0x0, z0, a, c); + Fp6mul_01(t0, t0, a, t1); + Fp6::sub(z.b, t0, z0x0); + z.b -= z1x1; + // a + bv + cv^2 = cxi + av + bv^2 + Fp2::mul_xi(z1x1.c, z1x1.c); + Fp2::add(z.a.a, z0x0.a, z1x1.c); + Fp2::add(z.a.b, z0x0.b, z1x1.a); + Fp2::add(z.a.c, z0x0.c, z1x1.b); +} +inline void mulSparse(Fp12& z, const Fp6& x) +{ + if (BN::param.cp.isMtype) { + mul_041(z, x); + } else { + mul_403(z, x); + } +} +inline void convertFp6toFp12(Fp12& y, const Fp6& x) +{ + y.clear(); + if (BN::param.cp.isMtype) { + // (a, b, c) -> (a, c, 0, 0, b, 0) + y.a.a = x.a; + y.b.b = x.b; + y.a.b = x.c; + } else { + // (a, b, c) -> (b, 0, 0, c, a, 0) + y.b.b = x.a; + y.a.a = x.b; + y.b.a = x.c; + } +} +inline void mulSparse2(Fp12& z, const Fp6& x, const Fp6& y) +{ + convertFp6toFp12(z, x); + mulSparse(z, y); +} +inline void mapToCyclotomic(Fp12& y, const Fp12& x) +{ + Fp12 z; + Fp12::Frobenius2(z, x); // z = x^(p^2) + z *= x; // x^(p^2 + 1) + Fp12::inv(y, z); + Fp6::neg(z.b, z.b); // z^(p^6) = conjugate of z + y *= z; +} +/* + Implementing Pairings at the 192-bit Security Level + D.F.Aranha, L.F.Castaneda, E.Knapp, A.Menezes, F.R.Henriquez + Section 4 +*/ +inline void expHardPartBLS12(Fp12& y, const Fp12& x) +{ +#if 0 + const mpz_class& p = param.p; + mpz_class p2 = p * p; + mpz_class p4 = p2 * p2; + Fp12::pow(y, x, (p4 - p2 + 1) / param.r * 3); + return; +#endif +#if 1 + Fp12 a0, a1, a2, a3, a4, a5, a6, a7; + Fp12::unitaryInv(a0, x); // a0 = x^-1 + fasterSqr(a1, a0); // x^-2 + pow_z(a2, x); // x^z + fasterSqr(a3, a2); // x^2z + a1 *= a2; // a1 = x^(z-2) + pow_z(a7, a1); // a7 = x^(z^2-2z) + pow_z(a4, a7); // a4 = x^(z^3-2z^2) + pow_z(a5, a4); // a5 = x^(z^4-2z^3) + a3 *= a5; // a3 = x^(z^4-2z^3+2z) + pow_z(a6, a3); // a6 = x^(z^5-2z^4+2z^2) + + Fp12::unitaryInv(a1, a1); // x^(2-z) + a1 *= a6; // x^(z^5-2z^4+2z^2-z+2) + a1 *= x; // x^(z^5-2z^4+2z^2-z+3) = x^c0 + a3 *= a0; // x^(z^4-2z^3-1) = x^c1 + Fp12::Frobenius(a3, a3); // x^(c1 p) + a1 *= a3; // x^(c0 + c1 p) + a4 *= a2; // x^(z^3-2z^2+z) = x^c2 + Fp12::Frobenius2(a4, a4); // x^(c2 p^2) + a1 *= a4; // x^(c0 + c1 p + c2 p^2) + a7 *= x; // x^(z^2-2z+1) = x^c3 + Fp12::Frobenius3(y, a7); + y *= a1; +#else + Fp12 t1, t2, t3; + Fp12::Frobenius(t1, x); + Fp12::Frobenius(t2, t1); + Fp12::Frobenius(t3, t2); + Fp12::pow(t1, t1, param.exp_c1); + Fp12::pow(t2, t2, param.exp_c2); + Fp12::pow(t3, t3, param.exp_c3); + Fp12::pow(y, x, param.exp_c0); + y *= t1; + y *= t2; + y *= t3; +#endif +} +/* + Faster Hashing to G2 + Laura Fuentes-Castaneda, Edward Knapp, Francisco Rodriguez-Henriquez + section 4.1 + y = x^(d 2z(6z^2 + 3z + 1)) where + p = p(z) = 36z^4 + 36z^3 + 24z^2 + 6z + 1 + r = r(z) = 36z^4 + 36z^3 + 18z^2 + 6z + 1 + d = (p^4 - p^2 + 1) / r + d1 = d 2z(6z^2 + 3z + 1) + = c0 + c1 p + c2 p^2 + c3 p^3 + + c0 = 1 + 6z + 12z^2 + 12z^3 + c1 = 4z + 6z^2 + 12z^3 + c2 = 6z + 6z^2 + 12z^3 + c3 = -1 + 4z + 6z^2 + 12z^3 + x -> x^z -> x^2z -> x^4z -> x^6z -> x^(6z^2) -> x^(12z^2) -> x^(12z^3) + a = x^(6z) x^(6z^2) x^(12z^3) + b = a / (x^2z) + x^d1 = (a x^(6z^2) x) b^p a^(p^2) (b / x)^(p^3) +*/ +inline void expHardPartBN(Fp12& y, const Fp12& x) +{ +#if 0 + const mpz_class& p = param.p; + mpz_class p2 = p * p; + mpz_class p4 = p2 * p2; + Fp12::pow(y, x, (p4 - p2 + 1) / param.r); + return; +#endif +#if 1 + Fp12 a, b; + Fp12 a2, a3; + pow_z(b, x); // x^z + fasterSqr(b, b); // x^2z + fasterSqr(a, b); // x^4z + a *= b; // x^6z + pow_z(a2, a); // x^(6z^2) + a *= a2; + fasterSqr(a3, a2); // x^(12z^2) + pow_z(a3, a3); // x^(12z^3) + a *= a3; + Fp12::unitaryInv(b, b); + b *= a; + a2 *= a; + Fp12::Frobenius2(a, a); + a *= a2; + a *= x; + Fp12::unitaryInv(y, x); + y *= b; + Fp12::Frobenius(b, b); + a *= b; + Fp12::Frobenius3(y, y); + y *= a; +#else + Fp12 t1, t2, t3; + Fp12::Frobenius(t1, x); + Fp12::Frobenius(t2, t1); + Fp12::Frobenius(t3, t2); + Fp12::pow(t1, t1, param.exp_c1); + Fp12::pow(t2, t2, param.exp_c2); + Fp12::pow(y, x, param.exp_c0); + y *= t1; + y *= t2; + y *= t3; +#endif +} +/* + remark : returned value is NOT on a curve +*/ +inline G1 makeAdjP(const G1& P) +{ + G1 adjP; + Fp::add(adjP.x, P.x, P.x); + adjP.x += P.x; + Fp::neg(adjP.y, P.y); + adjP.z = 1; + return adjP; +} + +} // mcl::bn::local + +/* + y = x^((p^12 - 1) / r) + (p^12 - 1) / r = (p^2 + 1) (p^6 - 1) (p^4 - p^2 + 1)/r + (a + bw)^(p^6) = a - bw in Fp12 + (p^4 - p^2 + 1)/r = c0 + c1 p + c2 p^2 + p^3 +*/ +inline void finalExp(Fp12& y, const Fp12& x) +{ +#if 1 + mapToCyclotomic(y, x); +#else + const mpz_class& p = param.p; + mpz_class p2 = p * p; + mpz_class p4 = p2 * p2; + Fp12::pow(y, x, p2 + 1); + Fp12::pow(y, y, p4 * p2 - 1); +#endif + if (BN::param.isBLS12) { + expHardPartBLS12(y, y); + } else { + expHardPartBN(y, y); + } +} +inline void millerLoop(Fp12& f, const G1& P_, const G2& Q_) +{ + G1 P(P_); + G2 Q(Q_); + P.normalize(); + Q.normalize(); + if (Q.isZero()) { + f = 1; + return; + } + assert(BN::param.siTbl[1] == 1); + G2 T = Q; + G2 negQ; + if (BN::param.useNAF) { + G2::neg(negQ, Q); + } + Fp6 d, e, l; + d = e = l = 1; + G1 adjP = makeAdjP(P); + dblLine(d, T, adjP); + addLine(l, T, Q, P); + mulSparse2(f, d, l); + for (size_t i = 2; i < BN::param.siTbl.size(); i++) { + dblLine(l, T, adjP); + Fp12::sqr(f, f); + mulSparse(f, l); + if (BN::param.siTbl[i]) { + if (BN::param.siTbl[i] > 0) { + addLine(l, T, Q, P); + } else { + addLine(l, T, negQ, P); + } + mulSparse(f, l); + } + } + if (BN::param.z < 0) { + G2::neg(T, T); + Fp6::neg(f.b, f.b); + } + if (BN::param.isBLS12) return; + G2 Q1, Q2; + Frobenius(Q1, Q); + Frobenius(Q2, Q1); + G2::neg(Q2, Q2); + addLine(d, T, Q1, P); + addLine(e, T, Q2, P); + Fp12 ft; + mulSparse2(ft, d, e); + f *= ft; +} +inline void pairing(Fp12& f, const G1& P, const G2& Q) +{ + millerLoop(f, P, Q); + finalExp(f, f); +} +/* + allocate param.precomputedQcoeffSize elements of Fp6 for Qcoeff +*/ +inline void precomputeG2(Fp6 *Qcoeff, const G2& Q_) +{ + size_t idx = 0; + G2 Q(Q_); + Q.normalize(); + if (Q.isZero()) { + for (size_t i = 0; i < BN::param.precomputedQcoeffSize; i++) { + Qcoeff[i] = 1; + } + return; + } + G2 T = Q; + G2 negQ; + if (BN::param.useNAF) { + G2::neg(negQ, Q); + } + assert(BN::param.siTbl[1] == 1); + dblLineWithoutP(Qcoeff[idx++], T); + addLineWithoutP(Qcoeff[idx++], T, Q); + for (size_t i = 2; i < BN::param.siTbl.size(); i++) { + dblLineWithoutP(Qcoeff[idx++], T); + if (BN::param.siTbl[i]) { + if (BN::param.siTbl[i] > 0) { + addLineWithoutP(Qcoeff[idx++], T, Q); + } else { + addLineWithoutP(Qcoeff[idx++], T, negQ); + } + } + } + if (BN::param.z < 0) { + G2::neg(T, T); + } + if (BN::param.isBLS12) return; + G2 Q1, Q2; + Frobenius(Q1, Q); + Frobenius(Q2, Q1); + G2::neg(Q2, Q2); + addLineWithoutP(Qcoeff[idx++], T, Q1); + addLineWithoutP(Qcoeff[idx++], T, Q2); + assert(idx == BN::param.precomputedQcoeffSize); +} +/* + millerLoop(e, P, Q) is same as the following + std::vector<Fp6> Qcoeff; + precomputeG2(Qcoeff, Q); + precomputedMillerLoop(e, P, Qcoeff); +*/ +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void precomputeG2(std::vector<Fp6>& Qcoeff, const G2& Q) +{ + Qcoeff.resize(BN::param.precomputedQcoeffSize); + precomputeG2(Qcoeff.data(), Q); +} +#endif +template<class Array> +void precomputeG2(bool *pb, Array& Qcoeff, const G2& Q) +{ + *pb = Qcoeff.resize(BN::param.precomputedQcoeffSize); + if (!*pb) return; + precomputeG2(Qcoeff.data(), Q); +} + +inline void precomputedMillerLoop(Fp12& f, const G1& P_, const Fp6* Qcoeff) +{ + G1 P(P_); + P.normalize(); + G1 adjP = makeAdjP(P); + size_t idx = 0; + Fp6 d, e, l; + mulFp6cb_by_G1xy(d, Qcoeff[idx], adjP); + idx++; + + mulFp6cb_by_G1xy(e, Qcoeff[idx], P); + idx++; + mulSparse2(f, d, e); + for (size_t i = 2; i < BN::param.siTbl.size(); i++) { + mulFp6cb_by_G1xy(l, Qcoeff[idx], adjP); + idx++; + Fp12::sqr(f, f); + mulSparse(f, l); + if (BN::param.siTbl[i]) { + mulFp6cb_by_G1xy(l, Qcoeff[idx], P); + idx++; + mulSparse(f, l); + } + } + if (BN::param.z < 0) { + Fp6::neg(f.b, f.b); + } + if (BN::param.isBLS12) return; + mulFp6cb_by_G1xy(d, Qcoeff[idx], P); + idx++; + mulFp6cb_by_G1xy(e, Qcoeff[idx], P); + idx++; + Fp12 ft; + mulSparse2(ft, d, e); + f *= ft; +} +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void precomputedMillerLoop(Fp12& f, const G1& P, const std::vector<Fp6>& Qcoeff) +{ + precomputedMillerLoop(f, P, Qcoeff.data()); +} +#endif +/* + f = MillerLoop(P1, Q1) x MillerLoop(P2, Q2) + Q2coeff : precomputed Q2 +*/ +inline void precomputedMillerLoop2mixed(Fp12& f, const G1& P1_, const G2& Q1_, const G1& P2_, const Fp6* Q2coeff) +{ + G1 P1(P1_), P2(P2_); + G2 Q1(Q1_); + P1.normalize(); + P2.normalize(); + Q1.normalize(); + if (Q1.isZero()) { + precomputedMillerLoop(f, P2_, Q2coeff); + return; + } + G2 T = Q1; + G2 negQ1; + if (BN::param.useNAF) { + G2::neg(negQ1, Q1); + } + G1 adjP1 = makeAdjP(P1); + G1 adjP2 = makeAdjP(P2); + size_t idx = 0; + Fp6 d1, d2, e1, e2, l1, l2; + dblLine(d1, T, adjP1); + mulFp6cb_by_G1xy(d2, Q2coeff[idx], adjP2); + idx++; + + Fp12 f1, f2; + e1 = 1; + addLine(e1, T, Q1, P1); + mulSparse2(f1, d1, e1); + + mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2); + mulSparse2(f2, d2, e2); + Fp12::mul(f, f1, f2); + idx++; + for (size_t i = 2; i < BN::param.siTbl.size(); i++) { + dblLine(l1, T, adjP1); + mulFp6cb_by_G1xy(l2, Q2coeff[idx], adjP2); + idx++; + Fp12::sqr(f, f); + mulSparse2(f1, l1, l2); + f *= f1; + if (BN::param.siTbl[i]) { + if (BN::param.siTbl[i] > 0) { + addLine(l1, T, Q1, P1); + } else { + addLine(l1, T, negQ1, P1); + } + mulFp6cb_by_G1xy(l2, Q2coeff[idx], P2); + idx++; + mulSparse2(f1, l1, l2); + f *= f1; + } + } + if (BN::param.z < 0) { + G2::neg(T, T); + Fp6::neg(f.b, f.b); + } + if (BN::param.isBLS12) return; + G2 Q11, Q12; + Frobenius(Q11, Q1); + Frobenius(Q12, Q11); + G2::neg(Q12, Q12); + addLine(d1, T, Q11, P1); + mulFp6cb_by_G1xy(d2, Q2coeff[idx], P2); + idx++; + addLine(e1, T, Q12, P1); + mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2); + idx++; + mulSparse2(f1, d1, e1); + mulSparse2(f2, d2, e2); + f *= f1; + f *= f2; +} +/* + f = MillerLoop(P1, Q1) x MillerLoop(P2, Q2) + Q1coeff, Q2coeff : precomputed Q1, Q2 +*/ +inline void precomputedMillerLoop2(Fp12& f, const G1& P1_, const Fp6* Q1coeff, const G1& P2_, const Fp6* Q2coeff) +{ + G1 P1(P1_), P2(P2_); + P1.normalize(); + P2.normalize(); + G1 adjP1 = makeAdjP(P1); + G1 adjP2 = makeAdjP(P2); + size_t idx = 0; + Fp6 d1, d2, e1, e2, l1, l2; + mulFp6cb_by_G1xy(d1, Q1coeff[idx], adjP1); + mulFp6cb_by_G1xy(d2, Q2coeff[idx], adjP2); + idx++; + + Fp12 f1, f2; + mulFp6cb_by_G1xy(e1, Q1coeff[idx], P1); + mulSparse2(f1, d1, e1); + + mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2); + mulSparse2(f2, d2, e2); + Fp12::mul(f, f1, f2); + idx++; + for (size_t i = 2; i < BN::param.siTbl.size(); i++) { + mulFp6cb_by_G1xy(l1, Q1coeff[idx], adjP1); + mulFp6cb_by_G1xy(l2, Q2coeff[idx], adjP2); + idx++; + Fp12::sqr(f, f); + mulSparse2(f1, l1, l2); + f *= f1; + if (BN::param.siTbl[i]) { + mulFp6cb_by_G1xy(l1, Q1coeff[idx], P1); + mulFp6cb_by_G1xy(l2, Q2coeff[idx], P2); + idx++; + mulSparse2(f1, l1, l2); + f *= f1; + } + } + if (BN::param.z < 0) { + Fp6::neg(f.b, f.b); + } + if (BN::param.isBLS12) return; + mulFp6cb_by_G1xy(d1, Q1coeff[idx], P1); + mulFp6cb_by_G1xy(d2, Q2coeff[idx], P2); + idx++; + mulFp6cb_by_G1xy(e1, Q1coeff[idx], P1); + mulFp6cb_by_G1xy(e2, Q2coeff[idx], P2); + idx++; + mulSparse2(f1, d1, e1); + mulSparse2(f2, d2, e2); + f *= f1; + f *= f2; +} +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void precomputedMillerLoop2(Fp12& f, const G1& P1, const std::vector<Fp6>& Q1coeff, const G1& P2, const std::vector<Fp6>& Q2coeff) +{ + precomputedMillerLoop2(f, P1, Q1coeff.data(), P2, Q2coeff.data()); +} +inline void precomputedMillerLoop2mixed(Fp12& f, const G1& P1, const G2& Q1, const G1& P2, const std::vector<Fp6>& Q2coeff) +{ + precomputedMillerLoop2mixed(f, P1, Q1, P2, Q2coeff.data()); +} +#endif +inline void mapToG1(bool *pb, G1& P, const Fp& x) { *pb = BN::param.mapTo.calcG1(P, x); } +inline void mapToG2(bool *pb, G2& P, const Fp2& x) { *pb = BN::param.mapTo.calcG2(P, x); } +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void mapToG1(G1& P, const Fp& x) +{ + bool b; + mapToG1(&b, P, x); + if (!b) throw cybozu::Exception("mapToG1:bad value") << x; +} +inline void mapToG2(G2& P, const Fp2& x) +{ + bool b; + mapToG2(&b, P, x); + if (!b) throw cybozu::Exception("mapToG2:bad value") << x; +} +#endif +inline void hashAndMapToG1(G1& P, const void *buf, size_t bufSize) +{ + Fp t; + t.setHashOf(buf, bufSize); + bool b; + mapToG1(&b, P, t); + // It will not happen that the hashed value is equal to special value + assert(b); + (void)b; +} +inline void hashAndMapToG2(G2& P, const void *buf, size_t bufSize) +{ + Fp2 t; + t.a.setHashOf(buf, bufSize); + t.b.clear(); + bool b; + mapToG2(&b, P, t); + // It will not happen that the hashed value is equal to special value + assert(b); + (void)b; +} +#ifndef CYBOZU_DONT_USE_STRING +inline void hashAndMapToG1(G1& P, const std::string& str) +{ + hashAndMapToG1(P, str.c_str(), str.size()); +} +inline void hashAndMapToG2(G2& P, const std::string& str) +{ + hashAndMapToG2(P, str.c_str(), str.size()); +} +#endif +inline void verifyOrderG1(bool doVerify) +{ + if (BN::param.isBLS12) { + G1::setOrder(doVerify ? BN::param.r : 0); + } +} +inline void verifyOrderG2(bool doVerify) +{ + G2::setOrder(doVerify ? BN::param.r : 0); +} + +// backward compatibility +using mcl::CurveParam; +static const CurveParam& CurveFp254BNb = BN254; +static const CurveParam& CurveFp382_1 = BN381_1; +static const CurveParam& CurveFp382_2 = BN381_2; +static const CurveParam& CurveFp462 = BN462; +static const CurveParam& CurveSNARK1 = BN_SNARK1; + +/* + FrobeniusOnTwist for Dtype + p mod 6 = 1, w^6 = xi + Frob(x', y') = phi Frob phi^-1(x', y') + = phi Frob (x' w^2, y' w^3) + = phi (x'^p w^2p, y'^p w^3p) + = (F(x') w^2(p - 1), F(y') w^3(p - 1)) + = (F(x') g^2, F(y') g^3) + + FrobeniusOnTwist for Dtype + use (1/g) instead of g +*/ +inline void Frobenius(G2& D, const G2& S) +{ + Fp2::Frobenius(D.x, S.x); + Fp2::Frobenius(D.y, S.y); + Fp2::Frobenius(D.z, S.z); + D.x *= BN::param.g2; + D.y *= BN::param.g3; +} +inline void Frobenius2(G2& D, const G2& S) +{ + Frobenius(D, S); + Frobenius(D, D); +} +inline void Frobenius3(G2& D, const G2& S) +{ + Frobenius(D, S); + Frobenius(D, D); + Frobenius(D, D); +} + +namespace BN { + +using namespace mcl::bn; // backward compatibility + +inline void init(bool *pb, const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO) +{ + local::StaticVar<>::param.init(pb, cp, mode); + if (!*pb) return; + G1::setMulArrayGLV(local::mulArrayGLV1); + G2::setMulArrayGLV(local::mulArrayGLV2); + Fp12::setPowArrayGLV(local::powArrayGLV2); + G1::setCompressedExpression(); + G2::setCompressedExpression(); + *pb = true; +} + +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void init(const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO) +{ + bool b; + init(&b, cp, mode); + if (!b) throw cybozu::Exception("BN:init"); +} +#endif + +} // mcl::bn::BN + +inline void initPairing(bool *pb, const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO) +{ + BN::init(pb, cp, mode); +} + +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void initPairing(const mcl::CurveParam& cp = mcl::BN254, fp::Mode mode = fp::FP_AUTO) +{ + bool b; + BN::init(&b, cp, mode); + if (!b) throw cybozu::Exception("bn:initPairing"); +} +#endif + +} } // mcl::bn + diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn256.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn256.hpp new file mode 100644 index 000000000..7a5da7a05 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn256.hpp @@ -0,0 +1,15 @@ +#pragma once +/** + @file + @brief preset class for 256-bit optimal ate pairing over BN curves + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#define MCL_MAX_FP_BIT_SIZE 256 +#include <mcl/bn.hpp> + +namespace mcl { namespace bn256 { +using namespace mcl::bn; +} } + diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn384.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn384.hpp new file mode 100644 index 000000000..8aa14fe5c --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn384.hpp @@ -0,0 +1,15 @@ +#pragma once +/** + @file + @brief preset class for 384-bit optimal ate pairing over BN curves + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#define MCL_MAX_FP_BIT_SIZE 384 +#include <mcl/bn.hpp> +// #define MCL_MAX_FR_BIT_SIZE 256 // can set if BLS12_381 + +namespace mcl { namespace bn384 { +using namespace mcl::bn; +} } diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/bn512.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn512.hpp new file mode 100644 index 000000000..c87ad9035 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/bn512.hpp @@ -0,0 +1,14 @@ +#pragma once +/** + @file + @brief preset class for 512-bit optimal ate pairing over BN curves + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#define MCL_MAX_FP_BIT_SIZE 512 +#include <mcl/bn.hpp> + +namespace mcl { namespace bn512 { +using namespace mcl::bn; +} } diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/conversion.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/conversion.hpp new file mode 100644 index 000000000..7a04b7fa2 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/conversion.hpp @@ -0,0 +1,495 @@ +#pragma once +#include <cybozu/itoa.hpp> +#include <cybozu/stream.hpp> +/** + @file + @brief convertion bin/dec/hex <=> array + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4127) +#endif + +namespace mcl { namespace fp { + +namespace local { + +inline bool isSpace(char c) +{ + return c == ' ' || c == '\t' || c == '\r' || c == '\n'; +} +template<class InputStream> +bool skipSpace(char *c, InputStream& is) +{ + for (;;) { + if (!cybozu::readChar(c, is)) return false; + if (!isSpace(*c)) return true; + } +} + +#ifndef CYBOZU_DONT_USE_STRING +template<class InputStream> +void loadWord(std::string& s, InputStream& is) +{ + s.clear(); + char c; + if (!skipSpace(&c, is)) return; + s = c; + for (;;) { + if (!cybozu::readChar(&c, is)) return; + if (isSpace(c)) break; + s += c; + } +} +#endif + +template<class InputStream> +size_t loadWord(char *buf, size_t bufSize, InputStream& is) +{ + if (bufSize == 0) return 0; + char c; + if (!skipSpace(&c, is)) return 0; + size_t pos = 0; + buf[pos++] = c; + for (;;) { + if (!cybozu::readChar(&c, is)) break; + if (isSpace(c)) break; + if (pos == bufSize) return 0; + buf[pos++] = c; + } + return pos; +} + + +/* + q = x[] / x + @retval r = x[] % x + @note accept q == x +*/ +inline uint32_t divU32(uint32_t *q, const uint32_t *x, size_t xn, uint32_t y) +{ + if (xn == 0) return 0; + uint32_t r = 0; + for (int i = (int)xn - 1; i >= 0; i--) { + uint64_t t = (uint64_t(r) << 32) | x[i]; + q[i] = uint32_t(t / y); + r = uint32_t(t % y); + } + return r; +} + +/* + z[0, xn) = x[0, xn) * y + return z[xn] + @note accept z == x +*/ +inline uint32_t mulU32(uint32_t *z, const uint32_t *x, size_t xn, uint32_t y) +{ + uint32_t H = 0; + for (size_t i = 0; i < xn; i++) { + uint32_t t = H; + uint64_t v = uint64_t(x[i]) * y; + uint32_t L = uint32_t(v); + H = uint32_t(v >> 32); + z[i] = t + L; + if (z[i] < t) { + H++; + } + } + return H; +} + +/* + x[0, xn) += y + return 1 if overflow else 0 +*/ +inline uint32_t addU32(uint32_t *x, size_t xn, uint32_t y) +{ + uint32_t t = x[0] + y; + x[0] = t; + if (t >= y) return 0; + for (size_t i = 1; i < xn; i++) { + t = x[i] + 1; + x[i] = t; + if (t != 0) return 0; + } + return 1; +} + +inline uint32_t decToU32(const char *p, size_t size, bool *pb) +{ + assert(0 < size && size <= 9); + uint32_t x = 0; + for (size_t i = 0; i < size; i++) { + char c = p[i]; + if (c < '0' || c > '9') { + *pb = false; + return 0; + } + x = x * 10 + uint32_t(c - '0'); + } + *pb = true; + return x; +} + +inline bool hexCharToUint8(uint8_t *v, char _c) +{ + uint32_t c = uint8_t(_c); // cast is necessary + if (c - '0' <= '9' - '0') { + c = c - '0'; + } else if (c - 'a' <= 'f' - 'a') { + c = (c - 'a') + 10; + } else if (c - 'A' <= 'F' - 'A') { + c = (c - 'A') + 10; + } else { + return false; + } + *v = uint8_t(c); + return true; +} + +template<class UT> +bool hexToUint(UT *px, const char *p, size_t size) +{ + assert(0 < size && size <= sizeof(UT) * 2); + UT x = 0; + for (size_t i = 0; i < size; i++) { + uint8_t v; + if (!hexCharToUint8(&v, p[i])) return false; + x = x * 16 + v; + } + *px = x; + return true; +} + +template<class UT> +bool binToUint(UT *px, const char *p, size_t size) +{ + assert(0 < size && size <= sizeof(UT) * 8); + UT x = 0; + for (size_t i = 0; i < size; i++) { + UT c = static_cast<uint8_t>(p[i]); + if (c == '0') { + x = x * 2; + } else if (c == '1') { + x = x * 2 + 1; + } else { + return false; + } + } + *px = x; + return true; +} + +inline bool parsePrefix(size_t *readSize, bool *isMinus, int *base, const char *buf, size_t bufSize) +{ + if (bufSize == 0) return false; + size_t pos = 0; + if (*buf == '-') { + if (bufSize == 1) return false; + *isMinus = true; + buf++; + pos++; + } else { + *isMinus = false; + } + if (buf[0] == '0') { + if (bufSize > 1 && buf[1] == 'x') { + if (*base == 0 || *base == 16) { + *base = 16; + pos += 2; + } else { + return false; + } + } else if (bufSize > 1 && buf[1] == 'b') { + if (*base == 0 || *base == 2) { + *base = 2; + pos += 2; + } else { + return false; + } + } + } + if (*base == 0) *base = 10; + if (pos == bufSize) return false; + *readSize = pos; + return true; +} + +} // mcl::fp::local + +/* + convert little endian x[0, xn) to buf + return written size if success else 0 + data is buf[bufSize - retval, bufSize) + start "0x" if withPrefix +*/ +template<class T> +size_t arrayToHex(char *buf, size_t bufSize, const T *x, size_t n, bool withPrefix = false) +{ + size_t fullN = 0; + if (n > 1) { + size_t pos = n - 1; + while (pos > 0) { + if (x[pos]) break; + pos--; + } + if (pos > 0) fullN = pos; + } + const T v = n == 0 ? 0 : x[fullN]; + const size_t topLen = cybozu::getHexLength(v); + const size_t startPos = withPrefix ? 2 : 0; + const size_t lenT = sizeof(T) * 2; + const size_t totalSize = startPos + fullN * lenT + topLen; + if (totalSize > bufSize) return 0; + char *const top = buf + bufSize - totalSize; + if (withPrefix) { + top[0] = '0'; + top[1] = 'x'; + } + cybozu::itohex(&top[startPos], topLen, v, false); + for (size_t i = 0; i < fullN; i++) { + cybozu::itohex(&top[startPos + topLen + i * lenT], lenT, x[fullN - 1 - i], false); + } + return totalSize; +} + +/* + convert little endian x[0, xn) to buf + return written size if success else 0 + data is buf[bufSize - retval, bufSize) + start "0b" if withPrefix +*/ +template<class T> +size_t arrayToBin(char *buf, size_t bufSize, const T *x, size_t n, bool withPrefix) +{ + size_t fullN = 0; + if (n > 1) { + size_t pos = n - 1; + while (pos > 0) { + if (x[pos]) break; + pos--; + } + if (pos > 0) fullN = pos; + } + const T v = n == 0 ? 0 : x[fullN]; + const size_t topLen = cybozu::getBinLength(v); + const size_t startPos = withPrefix ? 2 : 0; + const size_t lenT = sizeof(T) * 8; + const size_t totalSize = startPos + fullN * lenT + topLen; + if (totalSize > bufSize) return 0; + char *const top = buf + bufSize - totalSize; + if (withPrefix) { + top[0] = '0'; + top[1] = 'b'; + } + cybozu::itobin(&top[startPos], topLen, v); + for (size_t i = 0; i < fullN; i++) { + cybozu::itobin(&top[startPos + topLen + i * lenT], lenT, x[fullN - 1 - i]); + } + return totalSize; +} + +/* + convert hex string to x[0..xn) + hex string = [0-9a-fA-F]+ +*/ +template<class UT> +inline size_t hexToArray(UT *x, size_t maxN, const char *buf, size_t bufSize) +{ + if (bufSize == 0) return 0; + const size_t unitLen = sizeof(UT) * 2; + const size_t q = bufSize / unitLen; + const size_t r = bufSize % unitLen; + const size_t requireSize = q + (r ? 1 : 0); + if (maxN < requireSize) return 0; + for (size_t i = 0; i < q; i++) { + if (!local::hexToUint(&x[i], &buf[r + (q - 1 - i) * unitLen], unitLen)) return 0; + } + if (r) { + if (!local::hexToUint(&x[q], buf, r)) return 0; + } + return requireSize; +} +/* + convert bin string to x[0..xn) + bin string = [01]+ +*/ +template<class UT> +inline size_t binToArray(UT *x, size_t maxN, const char *buf, size_t bufSize) +{ + if (bufSize == 0) return 0; + const size_t unitLen = sizeof(UT) * 8; + const size_t q = bufSize / unitLen; + const size_t r = bufSize % unitLen; + const size_t requireSize = q + (r ? 1 : 0); + if (maxN < requireSize) return 0; + for (size_t i = 0; i < q; i++) { + if (!local::binToUint(&x[i], &buf[r + (q - 1 - i) * unitLen], unitLen)) return 0; + } + if (r) { + if (!local::binToUint(&x[q], buf, r)) return 0; + } + return requireSize; +} + +/* + little endian x[0, xn) to buf + return written size if success else 0 + data is buf[bufSize - retval, bufSize) +*/ +template<class UT> +inline size_t arrayToDec(char *buf, size_t bufSize, const UT *x, size_t xn) +{ + const size_t maxN = 64; + uint32_t t[maxN]; + if (sizeof(UT) == 8) { + xn *= 2; + } + if (xn > maxN) return 0; + memcpy(t, x, xn * sizeof(t[0])); + + const size_t width = 9; + const uint32_t i1e9 = 1000000000U; + size_t pos = 0; + for (;;) { + uint32_t r = local::divU32(t, t, xn, i1e9); + while (xn > 0 && t[xn - 1] == 0) xn--; + size_t len = cybozu::itoa_local::uintToDec(buf, bufSize - pos, r); + if (len == 0) return 0; + assert(0 < len && len <= width); + if (xn == 0) return pos + len; + // fill (width - len) '0' + for (size_t j = 0; j < width - len; j++) { + buf[bufSize - pos - width + j] = '0'; + } + pos += width; + } +} + +/* + convert buf[0, bufSize) to x[0, num) + return written num if success else 0 +*/ +template<class UT> +inline size_t decToArray(UT *_x, size_t maxN, const char *buf, size_t bufSize) +{ + assert(sizeof(UT) == 4 || sizeof(UT) == 8); + const size_t width = 9; + const uint32_t i1e9 = 1000000000U; + if (maxN == 0) return 0; + if (sizeof(UT) == 8) { + maxN *= 2; + } + uint32_t *x = reinterpret_cast<uint32_t*>(_x); + size_t xn = 1; + x[0] = 0; + while (bufSize > 0) { + size_t n = bufSize % width; + if (n == 0) n = width; + bool b; + uint32_t v = local::decToU32(buf, n, &b); + if (!b) return 0; + uint32_t H = local::mulU32(x, x, xn, i1e9); + if (H > 0) { + if (xn == maxN) return 0; + x[xn++] = H; + } + H = local::addU32(x, xn, v); + if (H > 0) { + if (xn == maxN) return 0; + x[xn++] = H; + } + buf += n; + bufSize -= n; + } + if (sizeof(UT) == 8 && (xn & 1)) { + x[xn++] = 0; + } + return xn / (sizeof(UT) / 4); +} + +/* + return retavl is written size if success else 0 + REMARK : the top of string is buf + bufSize - retval +*/ +template<class UT> +size_t arrayToStr(char *buf, size_t bufSize, const UT *x, size_t n, int base, bool withPrefix) +{ + switch (base) { + case 0: + case 10: + return arrayToDec(buf, bufSize, x, n); + case 16: + return arrayToHex(buf, bufSize, x, n, withPrefix); + case 2: + return arrayToBin(buf, bufSize, x, n, withPrefix); + default: + return 0; + } +} + +template<class UT> +size_t strToArray(bool *pIsMinus, UT *x, size_t xN, const char *buf, size_t bufSize, int ioMode) +{ + ioMode &= 31; + size_t readSize; + if (!local::parsePrefix(&readSize, pIsMinus, &ioMode, buf, bufSize)) return 0; + switch (ioMode) { + case 10: + return decToArray(x, xN, buf + readSize, bufSize - readSize); + case 16: + return hexToArray(x, xN, buf + readSize, bufSize - readSize); + case 2: + return binToArray(x, xN, buf + readSize, bufSize - readSize); + default: + return 0; + } +} + +/* + convert src[0, n) to (n * 2) byte hex string and write it to os + return true if success else flase +*/ +template<class OutputStream> +void writeHexStr(bool *pb, OutputStream& os, const void *src, size_t n) +{ + const uint8_t *p = (const uint8_t *)src; + for (size_t i = 0; i < n; i++) { + char hex[2]; + cybozu::itohex(hex, sizeof(hex), p[i], false); + cybozu::write(pb, os, hex, sizeof(hex)); + if (!*pb) return; + } + *pb = true; +} +/* + read hex string from is and convert it to byte array + return written buffer size +*/ +template<class InputStream> +inline size_t readHexStr(void *buf, size_t n, InputStream& is) +{ + bool b; + uint8_t *dst = (uint8_t *)buf; + for (size_t i = 0; i < n; i++) { + uint8_t L, H; + char c[2]; + if (cybozu::readSome(c, sizeof(c), is) != sizeof(c)) return i; + b = local::hexCharToUint8(&H, c[0]); + if (!b) return i; + b = local::hexCharToUint8(&L, c[1]); + if (!b) return i; + dst[i] = (H << 4) | L; + } + return n; +} + +} } // mcl::fp + +#ifdef _MSC_VER + #pragma warning(pop) +#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/curve_type.h b/vendor/github.com/dexon-foundation/mcl/include/mcl/curve_type.h new file mode 100644 index 000000000..5957d1ae8 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/curve_type.h @@ -0,0 +1,18 @@ +#pragma once +/** + @file + @brief curve type + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ + +enum { + MCL_BN254 = 0, + MCL_BN381_1 = 1, + MCL_BN381_2 = 2, + MCL_BN462 = 3, + MCL_BN_SNARK1 = 4, + MCL_BLS12_381 = 5, + MCL_BN160 = 6 +}; diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/ec.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/ec.hpp new file mode 100644 index 000000000..8ebf7e757 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/ec.hpp @@ -0,0 +1,1019 @@ +#pragma once +/** + @file + @brief elliptic curve + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <stdlib.h> +#include <cybozu/exception.hpp> +#include <mcl/op.hpp> +#include <mcl/util.hpp> + +//#define MCL_EC_USE_AFFINE + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4458) +#endif + +namespace mcl { + +namespace ec { + +enum Mode { + Jacobi, + Proj +}; + +} // mcl::ecl + +/* + elliptic curve + y^2 = x^3 + ax + b (affine) + y^2 = x^3 + az^4 + bz^6 (Jacobi) x = X/Z^2, y = Y/Z^3 +*/ +template<class _Fp> +class EcT : public fp::Serializable<EcT<_Fp> > { + enum { + zero, + minus3, + generic + }; +public: + typedef _Fp Fp; + typedef _Fp BaseFp; +#ifdef MCL_EC_USE_AFFINE + Fp x, y; + bool inf_; +#else + Fp x, y, z; + static int mode_; +#endif + static Fp a_; + static Fp b_; + static int specialA_; + static int ioMode_; + /* + order_ is the order of G2 which is the subgroup of EcT<Fp2>. + check the order of the elements if verifyOrder_ is true + */ + static bool verifyOrder_; + static mpz_class order_; + static void (*mulArrayGLV)(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime); + /* default constructor is undefined value */ + EcT() {} + EcT(const Fp& _x, const Fp& _y) + { + set(_x, _y); + } + bool isNormalized() const + { +#ifdef MCL_EC_USE_AFFINE + return true; +#else + return isZero() || z.isOne(); +#endif + } +#ifndef MCL_EC_USE_AFFINE +private: + void normalizeJacobi() + { + assert(!z.isZero()); + Fp rz2; + Fp::inv(z, z); + Fp::sqr(rz2, z); + x *= rz2; + y *= rz2; + y *= z; + z = 1; + } + void normalizeProj() + { + assert(!z.isZero()); + Fp::inv(z, z); + x *= z; + y *= z; + z = 1; + } + // Y^2 == X(X^2 + aZ^4) + bZ^6 + bool isValidJacobi() const + { + Fp y2, x2, z2, z4, t; + Fp::sqr(x2, x); + Fp::sqr(y2, y); + Fp::sqr(z2, z); + Fp::sqr(z4, z2); + Fp::mul(t, z4, a_); + t += x2; + t *= x; + z4 *= z2; + z4 *= b_; + t += z4; + return y2 == t; + } + // (Y^2 - bZ^2)Z = X(X^2 + aZ^2) + bool isValidProj() const + { + Fp y2, x2, z2, t; + Fp::sqr(x2, x); + Fp::sqr(y2, y); + Fp::sqr(z2, z); + Fp::mul(t, a_, z2); + t += x2; + t *= x; + z2 *= b_; + y2 -= z2; + y2 *= z; + return y2 == t; + } +#endif + // y^2 == (x^2 + a)x + b + static inline bool isValid(const Fp& _x, const Fp& _y) + { + Fp y2, t; + Fp::sqr(y2, _y); + Fp::sqr(t, _x); + t += a_; + t *= _x; + t += b_; + return y2 == t; + } +public: + void normalize() + { +#ifndef MCL_EC_USE_AFFINE + if (isNormalized()) return; + switch (mode_) { + case ec::Jacobi: + normalizeJacobi(); + break; + case ec::Proj: + normalizeProj(); + break; + } +#endif + } + static void normalize(EcT& y, const EcT& x) + { + y = x; + y.normalize(); + } + static inline void init(const Fp& a, const Fp& b, int mode = ec::Jacobi) + { + a_ = a; + b_ = b; + if (a_.isZero()) { + specialA_ = zero; + } else if (a_ == -3) { + specialA_ = minus3; + } else { + specialA_ = generic; + } + ioMode_ = 0; + verifyOrder_ = false; + order_ = 0; + mulArrayGLV = 0; +#ifdef MCL_EC_USE_AFFINE + cybozu::disable_warning_unused_variable(mode); +#else + assert(mode == ec::Jacobi || mode == ec::Proj); + mode_ = mode; +#endif + } + /* + verify the order of *this is equal to order if order != 0 + in constructor, set, setStr, operator<<(). + */ + static void setOrder(const mpz_class& order) + { + if (order != 0) { + verifyOrder_ = true; + order_ = order; + } else { + verifyOrder_ = false; + // don't clear order_ because it is used for isValidOrder() + } + } + static void setMulArrayGLV(void f(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime)) + { + mulArrayGLV = f; + } + static inline void init(bool *pb, const char *astr, const char *bstr, int mode = ec::Jacobi) + { + Fp a, b; + a.setStr(pb, astr); + if (!*pb) return; + b.setStr(pb, bstr); + if (!*pb) return; + init(a, b, mode); + } + // verify the order + bool isValidOrder() const + { + EcT Q; + EcT::mulGeneric(Q, *this, order_); + return Q.isZero(); + } + bool isValid() const + { + if (isZero()) return true; + bool isOK = false; +#ifndef MCL_EC_USE_AFFINE + if (!z.isOne()) { + switch (mode_) { + case ec::Jacobi: + isOK = isValidJacobi(); + break; + case ec::Proj: + isOK = isValidProj(); + break; + } + } else +#endif + { + isOK = isValid(x, y); + } + if (!isOK) return false; + if (verifyOrder_) return isValidOrder(); + return true; + } + void set(bool *pb, const Fp& _x, const Fp& _y, bool verify = true) + { + if (verify && !isValid(_x, _y)) { + *pb = false; + return; + } + x = _x; y = _y; +#ifdef MCL_EC_USE_AFFINE + inf_ = false; +#else + z = 1; +#endif + if (verify && verifyOrder_ && !isValidOrder()) { + *pb = false; + } else { + *pb = true; + } + } + void clear() + { +#ifdef MCL_EC_USE_AFFINE + inf_ = true; +#else + z.clear(); +#endif + x.clear(); + y.clear(); + } +#ifndef MCL_EC_USE_AFFINE + static inline void dblNoVerifyInfJacobi(EcT& R, const EcT& P) + { + Fp S, M, t, y2; + Fp::sqr(y2, P.y); + Fp::mul(S, P.x, y2); + const bool isPzOne = P.z.isOne(); + S += S; + S += S; + Fp::sqr(M, P.x); + switch (specialA_) { + case zero: + Fp::add(t, M, M); + M += t; + break; + case minus3: + if (isPzOne) { + M -= P.z; + } else { + Fp::sqr(t, P.z); + Fp::sqr(t, t); + M -= t; + } + Fp::add(t, M, M); + M += t; + break; + case generic: + default: + if (isPzOne) { + t = a_; + } else { + Fp::sqr(t, P.z); + Fp::sqr(t, t); + t *= a_; + } + t += M; + M += M; + M += t; + break; + } + Fp::sqr(R.x, M); + R.x -= S; + R.x -= S; + if (isPzOne) { + R.z = P.y; + } else { + Fp::mul(R.z, P.y, P.z); + } + R.z += R.z; + Fp::sqr(y2, y2); + y2 += y2; + y2 += y2; + y2 += y2; + Fp::sub(R.y, S, R.x); + R.y *= M; + R.y -= y2; + } + static inline void dblNoVerifyInfProj(EcT& R, const EcT& P) + { + const bool isPzOne = P.z.isOne(); + Fp w, t, h; + switch (specialA_) { + case zero: + Fp::sqr(w, P.x); + Fp::add(t, w, w); + w += t; + break; + case minus3: + Fp::sqr(w, P.x); + if (isPzOne) { + w -= P.z; + } else { + Fp::sqr(t, P.z); + w -= t; + } + Fp::add(t, w, w); + w += t; + break; + case generic: + default: + if (isPzOne) { + w = a_; + } else { + Fp::sqr(w, P.z); + w *= a_; + } + Fp::sqr(t, P.x); + w += t; + w += t; + w += t; // w = a z^2 + 3x^2 + break; + } + if (isPzOne) { + R.z = P.y; + } else { + Fp::mul(R.z, P.y, P.z); // s = yz + } + Fp::mul(t, R.z, P.x); + t *= P.y; // xys + t += t; + t += t; // 4(xys) ; 4B + Fp::sqr(h, w); + h -= t; + h -= t; // w^2 - 8B + Fp::mul(R.x, h, R.z); + t -= h; // h is free + t *= w; + Fp::sqr(w, P.y); + R.x += R.x; + R.z += R.z; + Fp::sqr(h, R.z); + w *= h; + R.z *= h; + Fp::sub(R.y, t, w); + R.y -= w; + } +#endif + static inline void dblNoVerifyInf(EcT& R, const EcT& P) + { +#ifdef MCL_EC_USE_AFFINE + Fp t, s; + Fp::sqr(t, P.x); + Fp::add(s, t, t); + t += s; + t += a_; + Fp::add(s, P.y, P.y); + t /= s; + Fp::sqr(s, t); + s -= P.x; + Fp x3; + Fp::sub(x3, s, P.x); + Fp::sub(s, P.x, x3); + s *= t; + Fp::sub(R.y, s, P.y); + R.x = x3; + R.inf_ = false; +#else + switch (mode_) { + case ec::Jacobi: + dblNoVerifyInfJacobi(R, P); + break; + case ec::Proj: + dblNoVerifyInfProj(R, P); + break; + } +#endif + } + static inline void dbl(EcT& R, const EcT& P) + { + if (P.isZero()) { + R.clear(); + return; + } + dblNoVerifyInf(R, P); + } +#ifndef MCL_EC_USE_AFFINE + static inline void addJacobi(EcT& R, const EcT& P, const EcT& Q) + { + const bool isQzOne = Q.z.isOne(); + Fp r, U1, S1, H, H3; + Fp::sqr(r, P.z); + if (isQzOne) { + U1 = P.x; + Fp::mul(H, Q.x, r); + H -= U1; + r *= P.z; + S1 = P.y; + } else { + Fp::sqr(S1, Q.z); + Fp::mul(U1, P.x, S1); + Fp::mul(H, Q.x, r); + H -= U1; + r *= P.z; + S1 *= Q.z; + S1 *= P.y; + } + r *= Q.y; + r -= S1; + if (H.isZero()) { + if (r.isZero()) { + dblNoVerifyInf(R, P); + } else { + R.clear(); + } + return; + } + if (isQzOne) { + Fp::mul(R.z, P.z, H); + } else { + Fp::mul(R.z, P.z, Q.z); + R.z *= H; + } + Fp::sqr(H3, H); // H^2 + Fp::sqr(R.y, r); // r^2 + U1 *= H3; // U1 H^2 + H3 *= H; // H^3 + R.y -= U1; + R.y -= U1; + Fp::sub(R.x, R.y, H3); + U1 -= R.x; + U1 *= r; + H3 *= S1; + Fp::sub(R.y, U1, H3); + } + static inline void addProj(EcT& R, const EcT& P, const EcT& Q) + { + const bool isQzOne = Q.z.isOne(); + Fp r, PyQz, v, A, vv; + if (isQzOne) { + r = P.x; + PyQz = P.y; + } else { + Fp::mul(r, P.x, Q.z); + Fp::mul(PyQz, P.y, Q.z); + } + Fp::mul(A, Q.y, P.z); + Fp::mul(v, Q.x, P.z); + v -= r; + if (v.isZero()) { + if (A == PyQz) { + dblNoVerifyInf(R, P); + } else { + R.clear(); + } + return; + } + Fp::sub(R.y, A, PyQz); + Fp::sqr(A, R.y); + Fp::sqr(vv, v); + r *= vv; + vv *= v; + if (isQzOne) { + R.z = P.z; + } else { + Fp::mul(R.z, P.z, Q.z); + } + A *= R.z; + R.z *= vv; + A -= vv; + vv *= PyQz; + A -= r; + A -= r; + Fp::mul(R.x, v, A); + r -= A; + R.y *= r; + R.y -= vv; + } +#endif + static inline void add(EcT& R, const EcT& P0, const EcT& Q0) + { + if (P0.isZero()) { R = Q0; return; } + if (Q0.isZero()) { R = P0; return; } + if (&P0 == &Q0) { + dblNoVerifyInf(R, P0); + return; + } +#ifdef MCL_EC_USE_AFFINE + const EcT& P(P0); + const EcT& Q(Q0); + Fp t; + Fp::neg(t, Q.y); + if (P.y == t) { R.clear(); return; } + Fp::sub(t, Q.x, P.x); + if (t.isZero()) { + dblNoVerifyInf(R, P); + return; + } + Fp s; + Fp::sub(s, Q.y, P.y); + Fp::div(t, s, t); + R.inf_ = false; + Fp x3; + Fp::sqr(x3, t); + x3 -= P.x; + x3 -= Q.x; + Fp::sub(s, P.x, x3); + s *= t; + Fp::sub(R.y, s, P.y); + R.x = x3; +#else + const EcT *pP = &P0; + const EcT *pQ = &Q0; + if (pP->z.isOne()) { + fp::swap_(pP, pQ); + } + const EcT& P(*pP); + const EcT& Q(*pQ); + switch (mode_) { + case ec::Jacobi: + addJacobi(R, P, Q); + break; + case ec::Proj: + addProj(R, P, Q); + break; + } +#endif + } + static inline void sub(EcT& R, const EcT& P, const EcT& Q) + { + EcT nQ; + neg(nQ, Q); + add(R, P, nQ); + } + static inline void neg(EcT& R, const EcT& P) + { + if (P.isZero()) { + R.clear(); + return; + } + R.x = P.x; + Fp::neg(R.y, P.y); +#ifdef MCL_EC_USE_AFFINE + R.inf_ = false; +#else + R.z = P.z; +#endif + } + template<class tag, size_t maxBitSize, template<class _tag, size_t _maxBitSize>class FpT> + static inline void mul(EcT& z, const EcT& x, const FpT<tag, maxBitSize>& y) + { + fp::Block b; + y.getBlock(b); + mulArray(z, x, b.p, b.n, false); + } + static inline void mul(EcT& z, const EcT& x, int64_t y) + { + const uint64_t u = fp::abs_(y); +#if MCL_SIZEOF_UNIT == 8 + mulArray(z, x, &u, 1, y < 0); +#else + uint32_t ua[2] = { uint32_t(u), uint32_t(u >> 32) }; + size_t un = ua[1] ? 2 : 1; + mulArray(z, x, ua, un, y < 0); +#endif + } + static inline void mul(EcT& z, const EcT& x, const mpz_class& y) + { + mulArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0); + } + template<class tag, size_t maxBitSize, template<class _tag, size_t _maxBitSize>class FpT> + static inline void mulCT(EcT& z, const EcT& x, const FpT<tag, maxBitSize>& y) + { + fp::Block b; + y.getBlock(b); + mulArray(z, x, b.p, b.n, false, true); + } + static inline void mulCT(EcT& z, const EcT& x, const mpz_class& y) + { + mulArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, true); + } + /* + 0 <= P for any P + (Px, Py) <= (P'x, P'y) iff Px < P'x or Px == P'x and Py <= P'y + @note compare function calls normalize() + */ + template<class F> + static inline int compareFunc(const EcT& P_, const EcT& Q_, F comp) + { + const bool QisZero = Q_.isZero(); + if (P_.isZero()) { + if (QisZero) return 0; + return -1; + } + if (QisZero) return 1; + EcT P(P_), Q(Q_); + P.normalize(); + Q.normalize(); + int c = comp(P.x, Q.x); + if (c > 0) return 1; + if (c < 0) return -1; + return comp(P.y, Q.y); + } + static inline int compare(const EcT& P, const EcT& Q) + { + return compareFunc(P, Q, Fp::compare); + } + static inline int compareRaw(const EcT& P, const EcT& Q) + { + return compareFunc(P, Q, Fp::compareRaw); + } + bool isZero() const + { +#ifdef MCL_EC_USE_AFFINE + return inf_; +#else + return z.isZero(); +#endif + } + static inline bool isMSBserialize() + { + return !b_.isZero() && (Fp::BaseFp::getBitSize() & 7) != 0; + } + template<class OutputStream> + void save(bool *pb, OutputStream& os, int ioMode) const + { + const char sep = *fp::getIoSeparator(ioMode); + if (ioMode & IoEcProj) { + cybozu::writeChar(pb, os, '4'); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + x.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + y.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } +#ifndef MCL_EC_USE_AFFINE + z.save(pb, os, ioMode); +#endif + return; + } + EcT P(*this); + P.normalize(); + if (ioMode & (IoSerialize | IoSerializeHexStr)) { + /* + if (isMSBserialize()) { + // n bytes + x | (y.isOdd ? 0x80 : 0) + } else { + // n + 1 bytes + (y.isOdd ? 3 : 2), x + } + */ + const size_t n = Fp::getByteSize(); + const size_t adj = isMSBserialize() ? 0 : 1; + char buf[sizeof(Fp) + 1]; + if (isZero()) { + memset(buf, 0, n + adj); + } else { + cybozu::MemoryOutputStream mos(buf + adj, n); + P.x.save(pb, mos, IoSerialize); if (!*pb) return; + if (adj) { + buf[0] = P.y.isOdd() ? 3 : 2; + } else { + if (P.y.isOdd()) { + buf[n - 1] |= 0x80; + } + } + } + if (ioMode & IoSerializeHexStr) { + mcl::fp::writeHexStr(pb, os, buf, n + adj); + } else { + cybozu::write(pb, os, buf, n + adj); + } + return; + } + if (isZero()) { + cybozu::writeChar(pb, os, '0'); + return; + } + if (ioMode & IoEcCompY) { + cybozu::writeChar(pb, os, P.y.isOdd() ? '3' : '2'); + if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + P.x.save(pb, os, ioMode); + } else { + cybozu::writeChar(pb, os, '1'); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + P.x.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + P.y.save(pb, os, ioMode); + } + } + template<class InputStream> + void load(bool *pb, InputStream& is, int ioMode) + { +#ifdef MCL_EC_USE_AFFINE + inf_ = false; +#else + z = 1; +#endif + if (ioMode & (IoSerialize | IoSerializeHexStr)) { + const size_t n = Fp::getByteSize(); + const size_t adj = isMSBserialize() ? 0 : 1; + const size_t n1 = n + adj; + char buf[sizeof(Fp) + 1]; + size_t readSize; + if (ioMode & IoSerializeHexStr) { + readSize = mcl::fp::readHexStr(buf, n1, is); + } else { + readSize = cybozu::readSome(buf, n1, is); + } + if (readSize != n1) { + *pb = false; + return; + } + if (fp::isZeroArray(buf, n1)) { + clear(); + *pb = true; + return; + } + bool isYodd; + if (adj) { + char c = buf[0]; + if (c != 2 && c != 3) { + *pb = false; + return; + } + isYodd = c == 3; + } else { + isYodd = (buf[n - 1] >> 7) != 0; + buf[n - 1] &= 0x7f; + } + x.setArray(pb, buf + adj, n); + if (!*pb) return; + *pb = getYfromX(y, x, isYodd); + if (!*pb) return; + } else { + char c = 0; + if (!fp::local::skipSpace(&c, is)) { + *pb = false; + return; + } + if (c == '0') { + clear(); + *pb = true; + return; + } + x.load(pb, is, ioMode); if (!*pb) return; + if (c == '1') { + y.load(pb, is, ioMode); if (!*pb) return; + if (!isValid(x, y)) { + *pb = false; + return; + } + } else if (c == '2' || c == '3') { + bool isYodd = c == '3'; + *pb = getYfromX(y, x, isYodd); + if (!*pb) return; + } else if (c == '4') { + y.load(pb, is, ioMode); if (!*pb) return; +#ifndef MCL_EC_USE_AFFINE + z.load(pb, is, ioMode); if (!*pb) return; +#endif + } else { + *pb = false; + return; + } + } + if (verifyOrder_ && !isValidOrder()) { + *pb = false; + } else { + *pb = true; + } + } + // deplicated + static void setCompressedExpression(bool compressedExpression = true) + { + if (compressedExpression) { + ioMode_ |= IoEcCompY; + } else { + ioMode_ &= ~IoEcCompY; + } + } + /* + set IoMode for operator<<(), or operator>>() + */ + static void setIoMode(int ioMode) + { + assert(!(ioMode & 0xff)); + ioMode_ = ioMode; + } + static inline int getIoMode() { return Fp::BaseFp::getIoMode() | ioMode_; } + static inline void getWeierstrass(Fp& yy, const Fp& x) + { + Fp t; + Fp::sqr(t, x); + t += a_; + t *= x; + Fp::add(yy, t, b_); + } + static inline bool getYfromX(Fp& y, const Fp& x, bool isYodd) + { + getWeierstrass(y, x); + if (!Fp::squareRoot(y, y)) { + return false; + } + if (y.isOdd() ^ isYodd) { + Fp::neg(y, y); + } + return true; + } + inline friend EcT operator+(const EcT& x, const EcT& y) { EcT z; add(z, x, y); return z; } + inline friend EcT operator-(const EcT& x, const EcT& y) { EcT z; sub(z, x, y); return z; } + template<class INT> + inline friend EcT operator*(const EcT& x, const INT& y) { EcT z; mul(z, x, y); return z; } + EcT& operator+=(const EcT& x) { add(*this, *this, x); return *this; } + EcT& operator-=(const EcT& x) { sub(*this, *this, x); return *this; } + template<class INT> + EcT& operator*=(const INT& x) { mul(*this, *this, x); return *this; } + EcT operator-() const { EcT x; neg(x, *this); return x; } + bool operator==(const EcT& rhs) const + { + EcT R; + sub(R, *this, rhs); // QQQ : optimized later + return R.isZero(); + } + bool operator!=(const EcT& rhs) const { return !operator==(rhs); } + bool operator<(const EcT& rhs) const + { + return compare(*this, rhs) < 0; + } + bool operator>=(const EcT& rhs) const { return !operator<(rhs); } + bool operator>(const EcT& rhs) const { return rhs < *this; } + bool operator<=(const EcT& rhs) const { return !operator>(rhs); } + static inline void mulArray(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime = false) + { + if (mulArrayGLV && (constTime || yn > 1)) { + mulArrayGLV(z, x, y, yn, isNegative, constTime); + return; + } + mulArrayBase(z, x, y, yn, isNegative, constTime); + } + static inline void mulArrayBase(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime) + { + EcT tmp; + const EcT *px = &x; + if (&z == &x) { + tmp = x; + px = &tmp; + } + z.clear(); + fp::powGeneric(z, *px, y, yn, EcT::add, EcT::dbl, EcT::normalize, constTime ? Fp::BaseFp::getBitSize() : 0); + if (isNegative) { + neg(z, z); + } + } + /* + generic mul + */ + static inline void mulGeneric(EcT& z, const EcT& x, const mpz_class& y, bool constTime = false) + { + mulArrayBase(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, constTime); + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + static inline void init(const std::string& astr, const std::string& bstr, int mode = ec::Jacobi) + { + bool b; + init(&b, astr.c_str(), bstr.c_str(), mode); + if (!b) throw cybozu::Exception("mcl:EcT:init"); + } + void set(const Fp& _x, const Fp& _y, bool verify = true) + { + bool b; + set(&b, _x, _y, verify); + if (!b) throw cybozu::Exception("ec:EcT:set") << _x << _y; + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("EcT:save"); + } + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("EcT:load"); + } +#endif +#ifndef CYBOZU_DONT_USE_STRING + // backward compatilibity + static inline void setParam(const std::string& astr, const std::string& bstr, int mode = ec::Jacobi) + { + init(astr, bstr, mode); + } + friend inline std::istream& operator>>(std::istream& is, EcT& self) + { + self.load(is, fp::detectIoMode(getIoMode(), is)); + return is; + } + friend inline std::ostream& operator<<(std::ostream& os, const EcT& self) + { + self.save(os, fp::detectIoMode(getIoMode(), os)); + return os; + } +#endif +}; + +template<class Fp> Fp EcT<Fp>::a_; +template<class Fp> Fp EcT<Fp>::b_; +template<class Fp> int EcT<Fp>::specialA_; +template<class Fp> int EcT<Fp>::ioMode_; +template<class Fp> bool EcT<Fp>::verifyOrder_; +template<class Fp> mpz_class EcT<Fp>::order_; +template<class Fp> void (*EcT<Fp>::mulArrayGLV)(EcT& z, const EcT& x, const fp::Unit *y, size_t yn, bool isNegative, bool constTime); +#ifndef MCL_EC_USE_AFFINE +template<class Fp> int EcT<Fp>::mode_; +#endif + +struct EcParam { + const char *name; + const char *p; + const char *a; + const char *b; + const char *gx; + const char *gy; + const char *n; + size_t bitSize; // bit length of p +}; + +} // mcl + +#ifdef CYBOZU_USE_BOOST +namespace mcl { +template<class Fp> +size_t hash_value(const mcl::EcT<Fp>& P_) +{ + if (P_.isZero()) return 0; + mcl::EcT<Fp> P(P_); P.normalize(); + return mcl::hash_value(P.y, mcl::hash_value(P.x)); +} + +} +#else +namespace std { CYBOZU_NAMESPACE_TR1_BEGIN + +template<class Fp> +struct hash<mcl::EcT<Fp> > { + size_t operator()(const mcl::EcT<Fp>& P_) const + { + if (P_.isZero()) return 0; + mcl::EcT<Fp> P(P_); P.normalize(); + return hash<Fp>()(P.y, hash<Fp>()(P.x)); + } +}; + +CYBOZU_NAMESPACE_TR1_END } // std +#endif + +#ifdef _MSC_VER + #pragma warning(pop) +#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/ecdsa.h b/vendor/github.com/dexon-foundation/mcl/include/mcl/ecdsa.h new file mode 100644 index 000000000..daeb6be53 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/ecdsa.h @@ -0,0 +1,105 @@ +#pragma once +/** + @file + @brief C interface of ECDSA + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <stdint.h> // for uint64_t, uint8_t +#include <stdlib.h> // for size_t + +#if defined(_MSC_VER) + #ifdef ECDSA_DLL_EXPORT + #define ECDSA_DLL_API __declspec(dllexport) + #else + #define ECDSA_DLL_API __declspec(dllimport) + #ifndef ECDSA_NO_AUTOLINK + #pragma comment(lib, "mclecdsa.lib") + #endif + #endif +#elif defined(__EMSCRIPTEN__) + #define ECDSA_DLL_API __attribute__((used)) +#else + #define ECDSA_DLL_API +#endif + +#ifndef mclSize + #ifdef __EMSCRIPTEN__ + // avoid 64-bit integer + #define mclSize unsigned int + #define mclInt int + #else + // use #define for cgo + #define mclSize size_t + #define mclInt int64_t + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef ECDSA_NOT_DEFINE_STRUCT + +typedef struct ecdsaSecretKey ecdsaSecretKey; +typedef struct ecdsaPublicKey ecdsaPublicKey; +typedef struct ecdsaSignature ecdsaSignature; + +#else + +typedef struct { + uint64_t d[4]; +} ecdsaSecretKey; + +typedef struct { + uint64_t d[4 * 3]; +} ecdsaPublicKey; + +typedef struct { + uint64_t d[4 * 2]; +} ecdsaSignature; + +#endif + +struct ecdsaPrecomputedPublicKey; + +/* + init library + return 0 if success + @note not threadsafe +*/ +ECDSA_DLL_API int ecdsaInit(void); + +// return written byte size if success else 0 +ECDSA_DLL_API mclSize ecdsaSecretKeySerialize(void *buf, mclSize maxBufSize, const ecdsaSecretKey *sec); +ECDSA_DLL_API mclSize ecdsaPublicKeySerialize(void *buf, mclSize maxBufSize, const ecdsaPublicKey *pub); +ECDSA_DLL_API mclSize ecdsaSignatureSerialize(void *buf, mclSize maxBufSize, const ecdsaSignature *sig); + +// return read byte size if sucess else 0 +ECDSA_DLL_API mclSize ecdsaSecretKeyDeserialize(ecdsaSecretKey* sec, const void *buf, mclSize bufSize); +ECDSA_DLL_API mclSize ecdsaPublicKeyDeserialize(ecdsaPublicKey* pub, const void *buf, mclSize bufSize); +ECDSA_DLL_API mclSize ecdsaSignatureDeserialize(ecdsaSignature* sig, const void *buf, mclSize bufSize); + +// return 0 if success +ECDSA_DLL_API int ecdsaSecretKeySetByCSPRNG(ecdsaSecretKey *sec); + +ECDSA_DLL_API void ecdsaGetPublicKey(ecdsaPublicKey *pub, const ecdsaSecretKey *sec); + +ECDSA_DLL_API void ecdsaSign(ecdsaSignature *sig, const ecdsaSecretKey *sec, const void *m, mclSize size); + +// return 1 if valid +ECDSA_DLL_API int ecdsaVerify(const ecdsaSignature *sig, const ecdsaPublicKey *pub, const void *m, mclSize size); +ECDSA_DLL_API int ecdsaVerifyPrecomputed(const ecdsaSignature *sig, const ecdsaPrecomputedPublicKey *pub, const void *m, mclSize size); + +// return nonzero if success +ECDSA_DLL_API ecdsaPrecomputedPublicKey *ecdsaPrecomputedPublicKeyCreate(); +// call this function to avoid memory leak +ECDSA_DLL_API void ecdsaPrecomputedPublicKeyDestroy(ecdsaPrecomputedPublicKey *ppub); +// return 0 if success +ECDSA_DLL_API int ecdsaPrecomputedPublicKeyInit(ecdsaPrecomputedPublicKey *ppub, const ecdsaPublicKey *pub); + +#ifdef __cplusplus +} +#endif + diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/ecdsa.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/ecdsa.hpp new file mode 100644 index 000000000..cf3ed3f65 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/ecdsa.hpp @@ -0,0 +1,257 @@ +#pragma once +/** + @file + @brief ECDSA + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <mcl/fp.hpp> +#include <mcl/ec.hpp> +#include <mcl/ecparam.hpp> +#include <mcl/window_method.hpp> + +namespace mcl { namespace ecdsa { + +namespace local { + +#ifndef MCLSHE_WIN_SIZE + #define MCLSHE_WIN_SIZE 10 +#endif +static const size_t winSize = MCLSHE_WIN_SIZE; + +struct FpTag; +struct ZnTag; + +} // mcl::ecdsa::local + +typedef mcl::FpT<local::FpTag, 256> Fp; +typedef mcl::FpT<local::ZnTag, 256> Zn; +typedef mcl::EcT<Fp> Ec; + +namespace local { + +struct Param { + mcl::EcParam ecParam; + Ec P; + mcl::fp::WindowMethod<Ec> Pbase; +}; + +inline Param& getParam() +{ + static Param p; + return p; +} + +inline void be32toZn(Zn& x, const mcl::fp::Unit *buf) +{ + const size_t n = 32; + const unsigned char *p = (const unsigned char*)buf; + unsigned char be[n]; + for (size_t i = 0; i < n; i++) { + be[i] = p[n - 1 - i]; + } + x.setArrayMaskMod(be, n); +} + +/* + y = x mod n +*/ +inline void FpToZn(Zn& y, const Fp& x) +{ + fp::Block b; + x.getBlock(b); + y.setArrayMaskMod(b.p, b.n); +} + +inline void setHashOf(Zn& x, const void *msg, size_t msgSize) +{ + mcl::fp::Unit xBuf[256 / 8 / sizeof(mcl::fp::Unit)]; + uint32_t hashSize = mcl::fp::sha256(xBuf, sizeof(xBuf), msg, (uint32_t)msgSize); + assert(hashSize == sizeof(xBuf)); + (void)hashSize; + be32toZn(x, xBuf); +} + +} // mcl::ecdsa::local + +const local::Param& param = local::getParam(); + +inline void init(bool *pb) +{ + const mcl::EcParam& ecParam = mcl::ecparam::secp256k1; + Zn::init(pb, ecParam.n); + if (!*pb) return; + Fp::init(pb, ecParam.p); + if (!*pb) return; + Ec::init(pb, ecParam.a, ecParam.b); + if (!*pb) return; + Zn::setIoMode(16); + Fp::setIoMode(16); + Ec::setIoMode(mcl::IoEcAffine); + local::Param& p = local::getParam(); + p.ecParam = ecParam; + Fp x, y; + x.setStr(pb, ecParam.gx); + if (!*pb) return; + y.setStr(pb, ecParam.gy); + if (!*pb) return; + p.P.set(pb, x, y); + if (!*pb) return; + p.Pbase.init(pb, p.P, ecParam.bitSize, local::winSize); +} + +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void init() +{ + bool b; + init(&b); + if (!b) throw cybozu::Exception("ecdsa:init"); +} +#endif + +typedef Zn SecretKey; +typedef Ec PublicKey; + +struct PrecomputedPublicKey { + mcl::fp::WindowMethod<Ec> pubBase_; + void init(bool *pb, const PublicKey& pub) + { + pubBase_.init(pb, pub, param.ecParam.bitSize, local::winSize); + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void init(const PublicKey& pub) + { + bool b; + init(&b, pub); + if (!b) throw cybozu::Exception("ecdsa:PrecomputedPublicKey:init"); + } +#endif +}; + +inline void getPublicKey(PublicKey& pub, const SecretKey& sec) +{ + Ec::mul(pub, param.P, sec); + pub.normalize(); +} + +struct Signature : public mcl::fp::Serializable<Signature> { + Zn r, s; + template<class InputStream> + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + r.load(pb, is, ioMode); if (!*pb) return; + s.load(pb, is, ioMode); + } + template<class OutputStream> + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + r.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + s.save(pb, os, ioMode); + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("ecdsa:Signature:load"); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("ecdsa:Signature:save"); + } +#endif +#ifndef CYBOZU_DONT_USE_STRING + friend std::istream& operator>>(std::istream& is, Signature& self) + { + self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const Signature& self) + { + self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); + return os; + } +#endif +}; + +inline void sign(Signature& sig, const SecretKey& sec, const void *msg, size_t msgSize) +{ + Zn& r = sig.r; + Zn& s = sig.s; + Zn z, k; + local::setHashOf(z, msg, msgSize); + Ec Q; + for (;;) { + k.setByCSPRNG(); + param.Pbase.mul(Q, k); + if (Q.isZero()) continue; + Q.normalize(); + local::FpToZn(r, Q.x); + if (r.isZero()) continue; + Zn::mul(s, r, sec); + s += z; + if (s.isZero()) continue; + s /= k; + return; + } +} + +namespace local { + +inline void mulDispatch(Ec& Q, const PublicKey& pub, const Zn& y) +{ + Ec::mul(Q, pub, y); +} + +inline void mulDispatch(Ec& Q, const PrecomputedPublicKey& ppub, const Zn& y) +{ + ppub.pubBase_.mul(Q, y); +} + +template<class Pub> +inline bool verify(const Signature& sig, const Pub& pub, const void *msg, size_t msgSize) +{ + const Zn& r = sig.r; + const Zn& s = sig.s; + if (r.isZero() || s.isZero()) return false; + Zn z, w, u1, u2; + local::setHashOf(z, msg, msgSize); + Zn::inv(w, s); + Zn::mul(u1, z, w); + Zn::mul(u2, r, w); + Ec Q1, Q2; + param.Pbase.mul(Q1, u1); +// Ec::mul(Q2, pub, u2); + local::mulDispatch(Q2, pub, u2); + Q1 += Q2; + if (Q1.isZero()) return false; + Q1.normalize(); + Zn x; + local::FpToZn(x, Q1.x); + return r == x; +} + +} // mcl::ecdsa::local + +inline bool verify(const Signature& sig, const PublicKey& pub, const void *msg, size_t msgSize) +{ + return local::verify(sig, pub, msg, msgSize); +} + +inline bool verify(const Signature& sig, const PrecomputedPublicKey& ppub, const void *msg, size_t msgSize) +{ + return local::verify(sig, ppub, msg, msgSize); +} + +} } // mcl::ecdsa + diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/ecparam.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/ecparam.hpp new file mode 100644 index 000000000..19b76bf55 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/ecparam.hpp @@ -0,0 +1,164 @@ +#pragma once +/** + @file + @brief Elliptic curve parameter + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <mcl/ec.hpp> + +namespace mcl { namespace ecparam { + +const struct mcl::EcParam secp160k1 = { + "secp160k1", + "0xfffffffffffffffffffffffffffffffeffffac73", + "0", + "7", + "0x3b4c382ce37aa192a4019e763036f4f5dd4d7ebb", + "0x938cf935318fdced6bc28286531733c3f03c4fee", + "0x100000000000000000001b8fa16dfab9aca16b6b3", + 160 +}; +// p=2^160 + 7 +const struct mcl::EcParam p160_1 = { + "p160_1", + "0x10000000000000000000000000000000000000007", + "10", + "1343632762150092499701637438970764818528075565078", + "1", + "1236612389951462151661156731535316138439983579284", + "1461501637330902918203683518218126812711137002561", + 161 +}; +const struct mcl::EcParam secp192k1 = { + "secp192k1", + "0xfffffffffffffffffffffffffffffffffffffffeffffee37", + "0", + "3", + "0xdb4ff10ec057e9ae26b07d0280b7f4341da5d1b1eae06c7d", + "0x9b2f2f6d9c5628a7844163d015be86344082aa88d95e2f9d", + "0xfffffffffffffffffffffffe26f2fc170f69466a74defd8d", + 192 +}; +const struct mcl::EcParam secp224k1 = { + "secp224k1", + "0xfffffffffffffffffffffffffffffffffffffffffffffffeffffe56d", + "0", + "5", + "0xa1455b334df099df30fc28a169a467e9e47075a90f7e650eb6b7a45c", + "0x7e089fed7fba344282cafbd6f7e319f7c0b0bd59e2ca4bdb556d61a5", + "0x10000000000000000000000000001dce8d2ec6184caf0a971769fb1f7", + 224 +}; +const struct mcl::EcParam secp256k1 = { + "secp256k1", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "0", + "7", + "0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", + "0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8", + "0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + 256 +}; +const struct mcl::EcParam secp384r1 = { + "secp384r1", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", + "-3", + "0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef", + "0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7", + "0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f", + "0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973", + 384 +}; +const struct mcl::EcParam secp521r1 = { + "secp521r1", + "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "-3", + "0x51953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00", + "0xc6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66", + "0x11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650", + "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e91386409", + 521 +}; +const struct mcl::EcParam NIST_P192 = { + "NIST_P192", + "0xfffffffffffffffffffffffffffffffeffffffffffffffff", + "-3", + "0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1", + "0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012", + "0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811", + "0xffffffffffffffffffffffff99def836146bc9b1b4d22831", + 192 +}; +const struct mcl::EcParam NIST_P224 = { + "NIST_P224", + "0xffffffffffffffffffffffffffffffff000000000000000000000001", + "-3", + "0xb4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4", + "0xb70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21", + "0xbd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34", + "0xffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", + 224 +}; +const struct mcl::EcParam NIST_P256 = { + "NIST_P256", + "0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff", + "-3", + "0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b", + "0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296", + "0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5", + "0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551", + 256 +}; +// same secp384r1 +const struct mcl::EcParam NIST_P384 = { + "NIST_P384", + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff", + "-3", + "0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef", + "0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7", + "0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f", + "0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973", + 384 +}; +// same secp521r1 +const struct mcl::EcParam NIST_P521 = { + "NIST_P521", + "0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "-3", + "0x051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00", + "0xc6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66", + "0x11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650", + "0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e91386409", + 521 +}; + +} // mcl::ecparam + +#ifndef CYBOZU_DONT_USE_STRING +static inline const mcl::EcParam* getEcParam(const std::string& name) +{ + static const mcl::EcParam *tbl[] = { + &ecparam::p160_1, + &ecparam::secp160k1, + &ecparam::secp192k1, + &ecparam::secp224k1, + &ecparam::secp256k1, + &ecparam::secp384r1, + &ecparam::secp521r1, + + &ecparam::NIST_P192, + &ecparam::NIST_P224, + &ecparam::NIST_P256, + &ecparam::NIST_P384, + &ecparam::NIST_P521, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + if (name == tbl[i]->name) return tbl[i]; + } + throw cybozu::Exception("mcl::getEcParam:not support name") << name; +} +#endif + +} // mcl diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/elgamal.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/elgamal.hpp new file mode 100644 index 000000000..8bc3104b7 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/elgamal.hpp @@ -0,0 +1,620 @@ +#pragma once +/** + @file + @brief lifted-ElGamal encryption + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause + + original: + Copyright (c) 2014, National Institute of Advanced Industrial + Science and Technology All rights reserved. + This source file is subject to BSD 3-Clause license. +*/ +#include <string> +#include <sstream> +#include <cybozu/unordered_map.hpp> +#ifndef CYBOZU_UNORDERED_MAP_STD +#include <map> +#endif +#include <cybozu/exception.hpp> +#include <cybozu/itoa.hpp> +#include <cybozu/atoi.hpp> +#include <mcl/window_method.hpp> + +namespace mcl { + +template<class _Ec, class Zn> +struct ElgamalT { + typedef _Ec Ec; + struct CipherText { + Ec c1; + Ec c2; + CipherText() + { + clear(); + } + /* + (c1, c2) = (0, 0) is trivial valid ciphertext for m = 0 + */ + void clear() + { + c1.clear(); + c2.clear(); + } + /* + add encoded message with encoded message + input : this = Enc(m1), c = Enc(m2) + output : this = Enc(m1 + m2) + */ + void add(const CipherText& c) + { + Ec::add(c1, c1, c.c1); + Ec::add(c2, c2, c.c2); + } + /* + mul by x + input : this = Enc(m), x + output : this = Enc(m x) + */ + template<class N> + void mul(const N& x) + { + Ec::mul(c1, c1, x); + Ec::mul(c2, c2, x); + } + /* + negative encoded message + input : this = Enc(m) + output : this = Enc(-m) + */ + void neg() + { + Ec::neg(c1, c1); + Ec::neg(c2, c2); + } + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + c1.load(is, ioMode); + c2.load(is, ioMode); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + c1.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + c2.save(os, ioMode); + } + void getStr(std::string& str, int ioMode = 0) const + { + str.clear(); + cybozu::StringOutputStream os(str); + save(os, ioMode); + } + std::string getStr(int ioMode = 0) const + { + std::string str; + getStr(str, ioMode); + return str; + } + void setStr(const std::string& str, int ioMode = 0) + { + cybozu::StringInputStream is(str); + load(is, ioMode); + } + friend inline std::ostream& operator<<(std::ostream& os, const CipherText& self) + { + self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); + return os; + } + friend inline std::istream& operator>>(std::istream& is, CipherText& self) + { + self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); + return is; + } + // obsolete + std::string toStr() const { return getStr(); } + void fromStr(const std::string& str) { setStr(str); } + }; + /* + Zero Knowledge Proof + cipher text with ZKP to ensure m = 0 or 1 + http://dx.doi.org/10.1587/transfun.E96.A.1156 + */ + struct Zkp { + Zn c0, c1, s0, s1; + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + c0.load(is, ioMode); + c1.load(is, ioMode); + s0.load(is, ioMode); + s1.load(is, ioMode); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + c0.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + c1.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + s0.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + s1.save(os, ioMode); + } + void getStr(std::string& str, int ioMode = 0) const + { + str.clear(); + cybozu::StringOutputStream os(str); + save(os, ioMode); + } + std::string getStr(int ioMode = 0) const + { + std::string str; + getStr(str, ioMode); + return str; + } + void setStr(const std::string& str, int ioMode = 0) + { + cybozu::StringInputStream is(str); + load(is, ioMode); + } + friend inline std::ostream& operator<<(std::ostream& os, const Zkp& self) + { + self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); + return os; + } + friend inline std::istream& operator>>(std::istream& is, Zkp& self) + { + self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); + return is; + } + // obsolete + std::string toStr() const { return getStr(); } + void fromStr(const std::string& str) { setStr(str); } + }; + + class PublicKey { + size_t bitSize; + Ec f; + Ec g; + Ec h; + bool enableWindowMethod_; + fp::WindowMethod<Ec> wm_f; + fp::WindowMethod<Ec> wm_g; + fp::WindowMethod<Ec> wm_h; + template<class N> + void mulDispatch(Ec& z, const Ec& x, const N& n, const fp::WindowMethod<Ec>& pw) const + { + if (enableWindowMethod_) { + pw.mul(z, n); + } else { + Ec::mul(z, x, n); + } + } + template<class N> + void mulF(Ec& z, const N& n) const { mulDispatch(z, f, n, wm_f); } + template<class N> + void mulG(Ec& z, const N& n) const { mulDispatch(z, g, n, wm_g); } + template<class N> + void mulH(Ec& z, const N& n) const { mulDispatch(z, h, n, wm_h); } + public: + PublicKey() + : bitSize(0) + , enableWindowMethod_(false) + { + } + void enableWindowMethod(size_t winSize = 10) + { + wm_f.init(f, bitSize, winSize); + wm_g.init(g, bitSize, winSize); + wm_h.init(h, bitSize, winSize); + enableWindowMethod_ = true; + } + const Ec& getF() const { return f; } + void init(size_t bitSize, const Ec& f, const Ec& g, const Ec& h) + { + this->bitSize = bitSize; + this->f = f; + this->g = g; + this->h = h; + enableWindowMethod_ = false; + enableWindowMethod(); + } + /* + encode message + input : m + output : c = (c1, c2) = (g^u, h^u f^m) + */ + void enc(CipherText& c, const Zn& m, fp::RandGen rg = fp::RandGen()) const + { + Zn u; + u.setRand(rg); + mulG(c.c1, u); + mulH(c.c2, u); + Ec t; + mulF(t, m); + Ec::add(c.c2, c.c2, t); + } + /* + encode message + input : m = 0 or 1 + output : c (c1, c2), zkp + */ + template<class Hash> + void encWithZkp(CipherText& c, Zkp& zkp, int m, Hash& hash, fp::RandGen rg = fp::RandGen()) const + { + if (m != 0 && m != 1) { + throw cybozu::Exception("elgamal:PublicKey:encWithZkp") << m; + } + Zn u; + u.setRand(rg); + mulG(c.c1, u); + mulH(c.c2, u); + if (m) { + Ec::add(c.c2, c.c2, f); + Zn r1; + r1.setRand(rg); + zkp.c0.setRand(rg); + zkp.s0.setRand(rg); + Ec R01, R02, R11, R12; + Ec t1, t2; + mulG(t1, zkp.s0); + Ec::mul(t2, c.c1, zkp.c0); + Ec::sub(R01, t1, t2); + mulH(t1, zkp.s0); + Ec::mul(t2, c.c2, zkp.c0); + Ec::sub(R02, t1, t2); + mulG(R11, r1); + mulH(R12, r1); + std::ostringstream os; + os << R01 << R02 << R11 << R12 << c.c1 << c.c2 << f << g << h; + hash.update(os.str()); + const std::string digest = hash.digest(); + Zn cc; + cc.setArrayMask(digest.c_str(), digest.size()); + zkp.c1 = cc - zkp.c0; + zkp.s1 = r1 + zkp.c1 * u; + } else { + Zn r0; + r0.setRand(rg); + zkp.c1.setRand(rg); + zkp.s1.setRand(rg); + Ec R01, R02, R11, R12; + mulG(R01, r0); + mulH(R02, r0); + Ec t1, t2; + mulG(t1, zkp.s1); + Ec::mul(t2, c.c1, zkp.c1); + Ec::sub(R11, t1, t2); + mulH(t1, zkp.s1); + Ec::sub(t2, c.c2, f); + Ec::mul(t2, t2, zkp.c1); + Ec::sub(R12, t1, t2); + std::ostringstream os; + os << R01 << R02 << R11 << R12 << c.c1 << c.c2 << f << g << h; + hash.update(os.str()); + const std::string digest = hash.digest(); + Zn cc; + cc.setArrayMask(digest.c_str(), digest.size()); + zkp.c0 = cc - zkp.c1; + zkp.s0 = r0 + zkp.c0 * u; + } + } + /* + verify cipher text with ZKP + */ + template<class Hash> + bool verify(const CipherText& c, const Zkp& zkp, Hash& hash) const + { + Ec R01, R02, R11, R12; + Ec t1, t2; + mulG(t1, zkp.s0); + Ec::mul(t2, c.c1, zkp.c0); + Ec::sub(R01, t1, t2); + mulH(t1, zkp.s0); + Ec::mul(t2, c.c2, zkp.c0); + Ec::sub(R02, t1, t2); + mulG(t1, zkp.s1); + Ec::mul(t2, c.c1, zkp.c1); + Ec::sub(R11, t1, t2); + mulH(t1, zkp.s1); + Ec::sub(t2, c.c2, f); + Ec::mul(t2, t2, zkp.c1); + Ec::sub(R12, t1, t2); + std::ostringstream os; + os << R01 << R02 << R11 << R12 << c.c1 << c.c2 << f << g << h; + hash.update(os.str()); + const std::string digest = hash.digest(); + Zn cc; + cc.setArrayMask(digest.c_str(), digest.size()); + return cc == zkp.c0 + zkp.c1; + } + /* + rerandomize encoded message + input : c = (c1, c2) + output : c = (c1 g^v, c2 h^v) + */ + void rerandomize(CipherText& c, fp::RandGen rg = fp::RandGen()) const + { + Zn v; + v.setRand(rg); + Ec t; + mulG(t, v); + Ec::add(c.c1, c.c1, t); + mulH(t, v); + Ec::add(c.c2, c.c2, t); + } + /* + add encoded message with plain message + input : c = Enc(m1) = (c1, c2), m2 + ouput : c = Enc(m1 + m2) = (c1, c2 f^m2) + */ + template<class N> + void add(CipherText& c, const N& m) const + { + Ec fm; + mulF(fm, m); + Ec::add(c.c2, c.c2, fm); + } + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + std::string s; + mcl::fp::local::loadWord(s, is); + bitSize = cybozu::atoi(s); + f.load(is, ioMode); + g.load(is, ioMode); + h.load(is, ioMode); + init(bitSize, f, g, h); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + std::string s = cybozu::itoa(bitSize); + cybozu::write(os, s.c_str(), s.size()); + cybozu::writeChar(os, ' '); + + const char sep = *fp::getIoSeparator(ioMode); + f.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + g.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + h.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + } + void getStr(std::string& str, int ioMode = 0) const + { + str.clear(); + cybozu::StringOutputStream os(str); + save(os, ioMode); + } + std::string getStr(int ioMode = 0) const + { + std::string str; + getStr(str, ioMode); + return str; + } + void setStr(const std::string& str, int ioMode = 0) + { + cybozu::StringInputStream is(str); + load(is, ioMode); + } + friend inline std::ostream& operator<<(std::ostream& os, const PublicKey& self) + { + self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); + return os; + } + friend inline std::istream& operator>>(std::istream& is, PublicKey& self) + { + self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); + return is; + } + // obsolete + std::string toStr() const { return getStr(); } + void fromStr(const std::string& str) { setStr(str); } + }; + /* + create table f^i for i in [rangeMin, rangeMax] + */ + struct PowerCache { +#if (CYBOZU_CPP_VERSION > CYBOZU_CPP_VERSION_CP03) + typedef CYBOZU_NAMESPACE_STD::unordered_map<Ec, int> Cache; +#else + typedef std::map<Ec, int> Cache; +#endif + Cache cache; + void init(const Ec& f, int rangeMin, int rangeMax) + { + if (rangeMin > rangeMax) throw cybozu::Exception("mcl:ElgamalT:PowerCache:bad range") << rangeMin << rangeMax; + Ec x; + x.clear(); + cache[x] = 0; + for (int i = 1; i <= rangeMax; i++) { + Ec::add(x, x, f); + cache[x] = i; + } + Ec nf; + Ec::neg(nf, f); + x.clear(); + for (int i = -1; i >= rangeMin; i--) { + Ec::add(x, x, nf); + cache[x] = i; + } + } + /* + return m such that f^m = g + */ + int getExponent(const Ec& g, bool *b = 0) const + { + typename Cache::const_iterator i = cache.find(g); + if (i == cache.end()) { + if (b) { + *b = false; + return 0; + } + throw cybozu::Exception("Elgamal:PowerCache:getExponent:not found") << g; + } + if (b) *b = true; + return i->second; + } + void clear() + { + cache.clear(); + } + bool isEmpty() const + { + return cache.empty(); + } + }; + class PrivateKey { + PublicKey pub; + Zn z; + PowerCache cache; + public: + /* + init + input : f + output : (g, h, z) + Ec = <f> + g in Ec + h = g^z + */ + void init(const Ec& f, size_t bitSize, fp::RandGen rg = fp::RandGen()) + { + Ec g, h; + z.setRand(rg); + Ec::mul(g, f, z); + z.setRand(rg); + Ec::mul(h, g, z); + pub.init(bitSize, f, g, h); + } + const PublicKey& getPublicKey() const { return pub; } + /* + decode message by brute-force attack + input : c = (c1, c2) + output : m + M = c2 / c1^z + find m such that M = f^m and |m| < limit + @memo 7sec@core i3 for m = 1e6 + */ + void dec(Zn& m, const CipherText& c, int limit = 100000) const + { + const Ec& f = pub.getF(); + Ec c1z; + Ec::mul(c1z, c.c1, z); + if (c1z == c.c2) { + m = 0; + return; + } + Ec t1(c1z); + Ec t2(c.c2); + for (int i = 1; i < limit; i++) { + Ec::add(t1, t1, f); + if (t1 == c.c2) { + m = i; + return; + } + Ec::add(t2, t2, f); + if (t2 == c1z) { + m = -i; + return; + } + } + throw cybozu::Exception("elgamal:PrivateKey:dec:overflow"); + } + /* + powfm = c2 / c1^z = f^m + */ + void getPowerf(Ec& powfm, const CipherText& c) const + { + Ec c1z; + Ec::mul(c1z, c.c1, z); + Ec::sub(powfm, c.c2, c1z); + } + /* + set range of message to decode quickly + */ + void setCache(int rangeMin, int rangeMax) + { + cache.init(pub.getF(), rangeMin, rangeMax); + } + /* + clear cache + */ + void clearCache() + { + cache.clear(); + } + /* + decode message by lookup table if !cache.isEmpty() + brute-force attack otherwise + input : c = (c1, c2) + b : set false if not found + return m + */ + int dec(const CipherText& c, bool *b = 0) const + { + Ec powfm; + getPowerf(powfm, c); + return cache.getExponent(powfm, b); + } + /* + check whether c is encrypted zero message + */ + bool isZeroMessage(const CipherText& c) const + { + Ec c1z; + Ec::mul(c1z, c.c1, z); + return c.c2 == c1z; + } + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + pub.load(is, ioMode); + z.load(is, ioMode); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + pub.save(os, ioMode); + if (sep) cybozu::writeChar(os, sep); + z.save(os, ioMode); + } + void getStr(std::string& str, int ioMode = 0) const + { + str.clear(); + cybozu::StringOutputStream os(str); + save(os, ioMode); + } + std::string getStr(int ioMode = 0) const + { + std::string str; + getStr(str, ioMode); + return str; + } + void setStr(const std::string& str, int ioMode = 0) + { + cybozu::StringInputStream is(str); + load(is, ioMode); + } + friend inline std::ostream& operator<<(std::ostream& os, const PrivateKey& self) + { + self.save(os, fp::detectIoMode(Ec::getIoMode(), os)); + return os; + } + friend inline std::istream& operator>>(std::istream& is, PrivateKey& self) + { + self.load(is, fp::detectIoMode(Ec::getIoMode(), is)); + return is; + } + std::string toStr() const { return getStr(); } + void fromStr(const std::string& str) { setStr(str); } + }; +}; + +} // mcl diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/fp.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/fp.hpp new file mode 100644 index 000000000..48500127e --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/fp.hpp @@ -0,0 +1,635 @@ +#pragma once +/** + @file + @brief finite field class + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#ifndef CYBOZU_DONT_USE_STRING +#include <iosfwd> +#endif +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4127) + #pragma warning(disable : 4458) + #ifndef NOMINMAX + #define NOMINMAX + #endif + #ifndef MCL_NO_AUTOLINK + #ifdef NDEBUG + #pragma comment(lib, "mcl.lib") + #else + #pragma comment(lib, "mcl.lib") + #endif + #endif +#endif +#include <cybozu/hash.hpp> +#include <cybozu/stream.hpp> +#include <mcl/op.hpp> +#include <mcl/util.hpp> +#include <mcl/operator.hpp> +#include <mcl/conversion.hpp> + +namespace mcl { + +struct FpTag; +struct ZnTag; + +namespace fp { + +// copy src to dst as little endian +void copyUnitToByteAsLE(uint8_t *dst, const Unit *src, size_t byteSize); +// copy src to dst as little endian +void copyByteToUnitAsLE(Unit *dst, const uint8_t *src, size_t byteSize); + +bool copyAndMask(Unit *y, const void *x, size_t xByteSize, const Op& op, MaskMode maskMode); + +uint64_t getUint64(bool *pb, const fp::Block& b); +int64_t getInt64(bool *pb, fp::Block& b, const fp::Op& op); + +const char *ModeToStr(Mode mode); + +Mode StrToMode(const char *s); + +#ifndef CYBOZU_DONT_USE_STRING +inline Mode StrToMode(const std::string& s) +{ + return StrToMode(s.c_str()); +} +#endif + +inline void dumpUnit(Unit x) +{ +#if MCL_SIZEOF_UNIT == 4 + printf("%08x", (uint32_t)x); +#else + printf("%016llx", (unsigned long long)x); +#endif +} + +bool isEnableJIT(); // 1st call is not threadsafe + +void getRandVal(Unit *out, RandGen& rg, const Unit *in, size_t bitSize); + +uint32_t sha256(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize); +uint32_t sha512(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize); + +} // mcl::fp + +template<class tag = FpTag, size_t maxBitSize = MCL_MAX_BIT_SIZE> +class FpT : public fp::Serializable<FpT<tag, maxBitSize>, + fp::Operator<FpT<tag, maxBitSize> > > { + typedef fp::Unit Unit; + typedef fp::Operator<FpT<tag, maxBitSize> > Operator; + typedef fp::Serializable<FpT<tag, maxBitSize>, Operator> Serializer; +public: + static const size_t maxSize = (maxBitSize + fp::UnitBitSize - 1) / fp::UnitBitSize; +private: + template<class tag2, size_t maxBitSize2> friend class FpT; + Unit v_[maxSize]; + static fp::Op op_; + static FpT<tag, maxBitSize> inv2_; + static int ioMode_; + template<class Fp> friend class FpDblT; + template<class Fp> friend class Fp2T; + template<class Fp> friend struct Fp6T; +public: + typedef FpT<tag, maxBitSize> BaseFp; + // return pointer to array v_[] + const Unit *getUnit() const { return v_; } + FpT* getFp0() { return this; } + const FpT* getFp0() const { return this; } + static inline size_t getUnitSize() { return op_.N; } + static inline size_t getBitSize() { return op_.bitSize; } + static inline size_t getByteSize() { return (op_.bitSize + 7) / 8; } + static inline const fp::Op& getOp() { return op_; } + void dump() const + { + const size_t N = op_.N; + for (size_t i = 0; i < N; i++) { + fp::dumpUnit(v_[N - 1 - i]); + } + printf("\n"); + } + static inline void init(bool *pb, const mpz_class& _p, fp::Mode mode = fp::FP_AUTO) + { + assert(maxBitSize <= MCL_MAX_BIT_SIZE); + *pb = op_.init(_p, maxBitSize, mode); + if (!*pb) return; + { // set oneRep + FpT& one = *reinterpret_cast<FpT*>(op_.oneRep); + one.clear(); + one.v_[0] = 1; + one.toMont(); + } + { // set half + mpz_class half = (op_.mp + 1) / 2; + gmp::getArray(pb, op_.half, op_.N, half); + if (!*pb) return; + } + inv(inv2_, 2); +#ifdef MCL_XBYAK_DIRECT_CALL + add = (void (*)(FpT& z, const FpT& x, const FpT& y))op_.fp_addA_; + if (add == 0) add = addC; + sub = (void (*)(FpT& z, const FpT& x, const FpT& y))op_.fp_subA_; + if (sub == 0) sub = subC; + neg = (void (*)(FpT& y, const FpT& x))op_.fp_negA_; + if (neg == 0) neg = negC; + mul = (void (*)(FpT& z, const FpT& x, const FpT& y))op_.fp_mulA_; + if (mul == 0) mul = mulC; + sqr = (void (*)(FpT& y, const FpT& x))op_.fp_sqrA_; + if (sqr == 0) sqr = sqrC; +#endif + *pb = true; + } + static inline void init(bool *pb, const char *mstr, fp::Mode mode = fp::FP_AUTO) + { + mpz_class p; + gmp::setStr(pb, p, mstr); + if (!*pb) return; + init(pb, p, mode); + } + static inline size_t getModulo(char *buf, size_t bufSize) + { + return gmp::getStr(buf, bufSize, op_.mp); + } + static inline bool isFullBit() { return op_.isFullBit; } + /* + binary patter of p + @note the value of p is zero + */ + static inline const FpT& getP() + { + return *reinterpret_cast<const FpT*>(op_.p); + } + bool isOdd() const + { + fp::Block b; + getBlock(b); + return (b.p[0] & 1) == 1; + } + static inline bool squareRoot(FpT& y, const FpT& x) + { + if (isMont()) return op_.sq.get(y, x); + mpz_class mx, my; + bool b = false; + x.getMpz(&b, mx); + if (!b) return false; + b = op_.sq.get(my, mx); + if (!b) return false; + y.setMpz(&b, my); + return b; + } + FpT() {} + FpT(const FpT& x) + { + op_.fp_copy(v_, x.v_); + } + FpT& operator=(const FpT& x) + { + op_.fp_copy(v_, x.v_); + return *this; + } + void clear() + { + op_.fp_clear(v_); + } + FpT(int64_t x) { operator=(x); } + FpT& operator=(int64_t x) + { + if (x == 1) { + op_.fp_copy(v_, op_.oneRep); + } else { + clear(); + if (x) { + int64_t y = x < 0 ? -x : x; + if (sizeof(Unit) == 8) { + v_[0] = y; + } else { + v_[0] = (uint32_t)y; + v_[1] = (uint32_t)(y >> 32); + } + if (x < 0) neg(*this, *this); + toMont(); + } + } + return *this; + } + static inline bool isMont() { return op_.isMont; } + /* + convert normal value to Montgomery value + do nothing is !isMont() + */ + void toMont() + { + if (isMont()) op_.toMont(v_, v_); + } + /* + convert Montgomery value to normal value + do nothing is !isMont() + */ + void fromMont() + { + if (isMont()) op_.fromMont(v_, v_); + } + template<class InputStream> + void load(bool *pb, InputStream& is, int ioMode) + { + bool isMinus = false; + *pb = false; + if (ioMode & (IoArray | IoArrayRaw | IoSerialize | IoSerializeHexStr)) { + const size_t n = getByteSize(); + v_[op_.N - 1] = 0; + size_t readSize; + if (ioMode & IoSerializeHexStr) { + readSize = mcl::fp::readHexStr(v_, n, is); + } else { + readSize = cybozu::readSome(v_, n, is); + } + if (readSize != n) return; + } else { + char buf[1024]; + size_t n = fp::local::loadWord(buf, sizeof(buf), is); + if (n == 0) return; + n = fp::strToArray(&isMinus, v_, op_.N, buf, n, ioMode); + if (n == 0) return; + for (size_t i = n; i < op_.N; i++) v_[i] = 0; + } + if (fp::isGreaterOrEqualArray(v_, op_.p, op_.N)) { + return; + } + if (isMinus) { + neg(*this, *this); + } + if (!(ioMode & IoArrayRaw)) { + toMont(); + } + *pb = true; + } + template<class OutputStream> + void save(bool *pb, OutputStream& os, int ioMode) const + { + const size_t n = getByteSize(); + if (ioMode & (IoArray | IoArrayRaw | IoSerialize | IoSerializeHexStr)) { + if (ioMode & IoArrayRaw) { + cybozu::write(pb, os, v_, n); + } else { + fp::Block b; + getBlock(b); + if (ioMode & IoSerializeHexStr) { + mcl::fp::writeHexStr(pb, os, b.p, n); + } else { + cybozu::write(pb, os, b.p, n); + } + } + return; + } + fp::Block b; + getBlock(b); + // use low 8-bit ioMode for (base, withPrefix) + char buf[2048]; + size_t len = mcl::fp::arrayToStr(buf, sizeof(buf), b.p, b.n, ioMode & 31, (ioMode & IoPrefix) != 0); + if (len == 0) { + *pb = false; + return; + } + cybozu::write(pb, os, buf + sizeof(buf) - len, len); + } + template<class S> + void setArray(bool *pb, const S *x, size_t n) + { + *pb = fp::copyAndMask(v_, x, sizeof(S) * n, op_, fp::NoMask); + toMont(); + } + /* + mask x with (1 << bitLen) and subtract p if x >= p + */ + template<class S> + void setArrayMaskMod(const S *x, size_t n) + { + fp::copyAndMask(v_, x, sizeof(S) * n, op_, fp::MaskAndMod); + toMont(); + } + + /* + mask x with (1 << (bitLen - 1)) - 1 if x >= p + */ + template<class S> + void setArrayMask(const S *x, size_t n) + { + fp::copyAndMask(v_, x, sizeof(S) * n, op_, fp::SmallMask); + toMont(); + } + void getBlock(fp::Block& b) const + { + b.n = op_.N; + if (isMont()) { + op_.fromMont(b.v_, v_); + b.p = &b.v_[0]; + } else { + b.p = &v_[0]; + } + } + void setByCSPRNG(fp::RandGen rg = fp::RandGen()) + { + if (rg.isZero()) rg = fp::RandGen::get(); + fp::getRandVal(v_, rg, op_.p, op_.bitSize); + toMont(); + } + void setRand(fp::RandGen rg = fp::RandGen()) // old api + { + setByCSPRNG(rg); + } + /* + hash msg and mask with (1 << (bitLen - 1)) - 1 + */ + void setHashOf(const void *msg, size_t msgSize) + { + char buf[MCL_MAX_HASH_BIT_SIZE / 8]; + uint32_t size = op_.hash(buf, static_cast<uint32_t>(sizeof(buf)), msg, static_cast<uint32_t>(msgSize)); + setArrayMask(buf, size); + } + void getMpz(bool *pb, mpz_class& x) const + { + fp::Block b; + getBlock(b); + gmp::setArray(pb, x, b.p, b.n); + } + void setMpz(bool *pb, const mpz_class& x) + { + if (x < 0) { + *pb = false; + return; + } + setArray(pb, gmp::getUnit(x), gmp::getUnitSize(x)); + } +#ifdef MCL_XBYAK_DIRECT_CALL + static void (*add)(FpT& z, const FpT& x, const FpT& y); + static inline void addC(FpT& z, const FpT& x, const FpT& y) { op_.fp_add(z.v_, x.v_, y.v_, op_.p); } + static void (*sub)(FpT& z, const FpT& x, const FpT& y); + static inline void subC(FpT& z, const FpT& x, const FpT& y) { op_.fp_sub(z.v_, x.v_, y.v_, op_.p); } + static void (*neg)(FpT& y, const FpT& x); + static inline void negC(FpT& y, const FpT& x) { op_.fp_neg(y.v_, x.v_, op_.p); } + static void (*mul)(FpT& z, const FpT& x, const FpT& y); + static inline void mulC(FpT& z, const FpT& x, const FpT& y) { op_.fp_mul(z.v_, x.v_, y.v_, op_.p); } + static void (*sqr)(FpT& y, const FpT& x); + static inline void sqrC(FpT& y, const FpT& x) { op_.fp_sqr(y.v_, x.v_, op_.p); } +#else + static inline void add(FpT& z, const FpT& x, const FpT& y) { op_.fp_add(z.v_, x.v_, y.v_, op_.p); } + static inline void sub(FpT& z, const FpT& x, const FpT& y) { op_.fp_sub(z.v_, x.v_, y.v_, op_.p); } + static inline void neg(FpT& y, const FpT& x) { op_.fp_neg(y.v_, x.v_, op_.p); } + static inline void mul(FpT& z, const FpT& x, const FpT& y) { op_.fp_mul(z.v_, x.v_, y.v_, op_.p); } + static inline void sqr(FpT& y, const FpT& x) { op_.fp_sqr(y.v_, x.v_, op_.p); } +#endif + static inline void addPre(FpT& z, const FpT& x, const FpT& y) { op_.fp_addPre(z.v_, x.v_, y.v_); } + static inline void subPre(FpT& z, const FpT& x, const FpT& y) { op_.fp_subPre(z.v_, x.v_, y.v_); } + static inline void mulUnit(FpT& z, const FpT& x, const Unit y) + { + if (mulSmallUnit(z, x, y)) return; + op_.fp_mulUnit(z.v_, x.v_, y, op_.p); + } + static inline void inv(FpT& y, const FpT& x) { op_.fp_invOp(y.v_, x.v_, op_); } + static inline void divBy2(FpT& y, const FpT& x) + { +#if 0 + mul(y, x, inv2_); +#else + bool odd = (x.v_[0] & 1) != 0; + op_.fp_shr1(y.v_, x.v_); + if (odd) { + op_.fp_addPre(y.v_, y.v_, op_.half); + } +#endif + } + static inline void divBy4(FpT& y, const FpT& x) + { + divBy2(y, x); // QQQ : optimize later + divBy2(y, y); + } + bool isZero() const { return op_.fp_isZero(v_); } + bool isOne() const { return fp::isEqualArray(v_, op_.oneRep, op_.N); } + static const inline FpT& one() { return *reinterpret_cast<const FpT*>(op_.oneRep); } + /* + half = (p + 1) / 2 + return true if half <= x < p + return false if 0 <= x < half + */ + bool isNegative() const + { + fp::Block b; + getBlock(b); + return fp::isGreaterOrEqualArray(b.p, op_.half, op_.N); + } + bool isValid() const + { + return fp::isLessArray(v_, op_.p, op_.N); + } + uint64_t getUint64(bool *pb) const + { + fp::Block b; + getBlock(b); + return fp::getUint64(pb, b); + } + int64_t getInt64(bool *pb) const + { + fp::Block b; + getBlock(b); + return fp::getInt64(pb, b, op_); + } + bool operator==(const FpT& rhs) const { return fp::isEqualArray(v_, rhs.v_, op_.N); } + bool operator!=(const FpT& rhs) const { return !operator==(rhs); } + /* + @note + this compare functions is slow because of calling mul if isMont is true. + */ + static inline int compare(const FpT& x, const FpT& y) + { + fp::Block xb, yb; + x.getBlock(xb); + y.getBlock(yb); + return fp::compareArray(xb.p, yb.p, op_.N); + } + bool isLess(const FpT& rhs) const + { + fp::Block xb, yb; + getBlock(xb); + rhs.getBlock(yb); + return fp::isLessArray(xb.p, yb.p, op_.N); + } + bool operator<(const FpT& rhs) const { return isLess(rhs); } + bool operator>=(const FpT& rhs) const { return !operator<(rhs); } + bool operator>(const FpT& rhs) const { return rhs < *this; } + bool operator<=(const FpT& rhs) const { return !operator>(rhs); } + /* + @note + return unexpected order if isMont is set. + */ + static inline int compareRaw(const FpT& x, const FpT& y) + { + return fp::compareArray(x.v_, y.v_, op_.N); + } + bool isLessRaw(const FpT& rhs) const + { + return fp::isLessArray(v_, rhs.v_, op_.N); + } + /* + set IoMode for operator<<(), or operator>>() + */ + static inline void setIoMode(int ioMode) + { + ioMode_ = ioMode; + } + static inline int getIoMode() { return ioMode_; } + static inline size_t getModBitLen() { return getBitSize(); } + static inline void setHashFunc(uint32_t hash(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize)) + { + op_.hash = hash; + } +#ifndef CYBOZU_DONT_USE_STRING + explicit FpT(const std::string& str, int base = 0) + { + Serializer::setStr(str, base); + } + static inline void getModulo(std::string& pstr) + { + gmp::getStr(pstr, op_.mp); + } + static std::string getModulo() + { + std::string s; + getModulo(s); + return s; + } + void setHashOf(const std::string& msg) + { + setHashOf(msg.data(), msg.size()); + } + // backward compatibility + static inline void setModulo(const std::string& mstr, fp::Mode mode = fp::FP_AUTO) + { + init(mstr, mode); + } + friend inline std::ostream& operator<<(std::ostream& os, const FpT& self) + { + self.save(os, fp::detectIoMode(getIoMode(), os)); + return os; + } + friend inline std::istream& operator>>(std::istream& is, FpT& self) + { + self.load(is, fp::detectIoMode(getIoMode(), is)); + return is; + } +#endif +#ifndef CYBOZU_DONT_USE_EXCEPTION + static inline void init(const mpz_class& _p, fp::Mode mode = fp::FP_AUTO) + { + bool b; + init(&b, _p, mode); + if (!b) throw cybozu::Exception("Fp:init"); + } + static inline void init(const std::string& mstr, fp::Mode mode = fp::FP_AUTO) + { + bool b; + init(&b, mstr.c_str(), mode); + if (!b) throw cybozu::Exception("Fp:init"); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("fp:save") << ioMode; + } + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("fp:load") << ioMode; + } + /* + throw exception if x >= p + */ + template<class S> + void setArray(const S *x, size_t n) + { + bool b; + setArray(&b, x, n); + if (!b) throw cybozu::Exception("Fp:setArray"); + } + void setMpz(const mpz_class& x) + { + bool b; + setMpz(&b, x); + if (!b) throw cybozu::Exception("Fp:setMpz:neg"); + } + uint64_t getUint64() const + { + bool b; + uint64_t v = getUint64(&b); + if (!b) throw cybozu::Exception("Fp:getUint64:large value"); + return v; + } + int64_t getInt64() const + { + bool b; + int64_t v = getInt64(&b); + if (!b) throw cybozu::Exception("Fp:getInt64:large value"); + return v; + } + void getMpz(mpz_class& x) const + { + bool b; + getMpz(&b, x); + if (!b) throw cybozu::Exception("Fp:getMpz"); + } + mpz_class getMpz() const + { + mpz_class x; + getMpz(x); + return x; + } +#endif +}; + +template<class tag, size_t maxBitSize> fp::Op FpT<tag, maxBitSize>::op_; +template<class tag, size_t maxBitSize> FpT<tag, maxBitSize> FpT<tag, maxBitSize>::inv2_; +template<class tag, size_t maxBitSize> int FpT<tag, maxBitSize>::ioMode_ = IoAuto; +#ifdef MCL_XBYAK_DIRECT_CALL +template<class tag, size_t maxBitSize> void (*FpT<tag, maxBitSize>::add)(FpT& z, const FpT& x, const FpT& y); +template<class tag, size_t maxBitSize> void (*FpT<tag, maxBitSize>::sub)(FpT& z, const FpT& x, const FpT& y); +template<class tag, size_t maxBitSize> void (*FpT<tag, maxBitSize>::neg)(FpT& y, const FpT& x); +template<class tag, size_t maxBitSize> void (*FpT<tag, maxBitSize>::mul)(FpT& z, const FpT& x, const FpT& y); +template<class tag, size_t maxBitSize> void (*FpT<tag, maxBitSize>::sqr)(FpT& y, const FpT& x); +#endif + +} // mcl + +#ifdef CYBOZU_USE_BOOST +namespace mcl { + +template<class tag, size_t maxBitSize> +size_t hash_value(const mcl::FpT<tag, maxBitSize>& x, size_t v = 0) +{ + return static_cast<size_t>(cybozu::hash64(x.getUnit(), x.getUnitSize(), v)); +} + +} +#else +namespace std { CYBOZU_NAMESPACE_TR1_BEGIN + +template<class tag, size_t maxBitSize> +struct hash<mcl::FpT<tag, maxBitSize> > { + size_t operator()(const mcl::FpT<tag, maxBitSize>& x, uint64_t v = 0) const + { + return static_cast<size_t>(cybozu::hash64(x.getUnit(), x.getUnitSize(), v)); + } +}; + +CYBOZU_NAMESPACE_TR1_END } // std::tr1 +#endif + +#ifdef _MSC_VER + #pragma warning(pop) +#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/fp_tower.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/fp_tower.hpp new file mode 100644 index 000000000..63738a3f5 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/fp_tower.hpp @@ -0,0 +1,1342 @@ +#pragma once +/** + @file + @brief finite field extension class + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <mcl/fp.hpp> + +namespace mcl { + +template<class Fp> +class FpDblT : public fp::Serializable<FpDblT<Fp> > { + typedef fp::Unit Unit; + Unit v_[Fp::maxSize * 2]; +public: + static size_t getUnitSize() { return Fp::op_.N * 2; } + FpDblT() : v_() + { + } + FpDblT(const FpDblT& rhs) + { + const size_t n = getUnitSize(); + for (size_t i = 0; i < n; i++) { + v_[i] = rhs.v_[i]; + } + } + void dump() const + { + const size_t n = getUnitSize(); + for (size_t i = 0; i < n; i++) { + mcl::fp::dumpUnit(v_[n - 1 - i]); + } + printf("\n"); + } + template<class OutputStream> + void save(bool *pb, OutputStream& os, int) const + { + char buf[1024]; + size_t n = mcl::fp::arrayToHex(buf, sizeof(buf), v_, getUnitSize()); + if (n == 0) { + *pb = false; + return; + } + cybozu::write(pb, os, buf + sizeof(buf) - n, sizeof(buf)); + } + template<class InputStream> + void load(bool *pb, InputStream& is, int) + { + char buf[1024]; + *pb = false; + size_t n = fp::local::loadWord(buf, sizeof(buf), is); + if (n == 0) return; + n = fp::hexToArray(v_, getUnitSize(), buf, n); + if (n == 0) return; + for (size_t i = n; i < getUnitSize(); i++) v_[i] = 0; + *pb = true; + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("FpDblT:save") << ioMode; + } + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("FpDblT:load") << ioMode; + } + void getMpz(mpz_class& x) const + { + bool b; + getMpz(&b, x); + if (!b) throw cybozu::Exception("FpDblT:getMpz"); + } + mpz_class getMpz() const + { + mpz_class x; + getMpz(x); + return x; + } +#endif + void clear() + { + const size_t n = getUnitSize(); + for (size_t i = 0; i < n; i++) { + v_[i] = 0; + } + } + FpDblT& operator=(const FpDblT& rhs) + { + const size_t n = getUnitSize(); + for (size_t i = 0; i < n; i++) { + v_[i] = rhs.v_[i]; + } + return *this; + } + // QQQ : does not check range of x strictly(use for debug) + void setMpz(const mpz_class& x) + { + assert(x >= 0); + const size_t xn = gmp::getUnitSize(x); + const size_t N2 = getUnitSize(); + if (xn > N2) { + assert(0); + return; + } + memcpy(v_, gmp::getUnit(x), xn * sizeof(Unit)); + memset(v_ + xn, 0, (N2 - xn) * sizeof(Unit)); + } + void getMpz(bool *pb, mpz_class& x) const + { + gmp::setArray(pb, x, v_, Fp::op_.N * 2); + } +#ifdef MCL_XBYAK_DIRECT_CALL + static void (*add)(FpDblT& z, const FpDblT& x, const FpDblT& y); + static void (*sub)(FpDblT& z, const FpDblT& x, const FpDblT& y); + static void (*mod)(Fp& z, const FpDblT& xy); + static void addC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_add(z.v_, x.v_, y.v_, Fp::op_.p); } + static void subC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_sub(z.v_, x.v_, y.v_, Fp::op_.p); } + static void modC(Fp& z, const FpDblT& xy) { Fp::op_.fpDbl_mod(z.v_, xy.v_, Fp::op_.p); } +#else + static void add(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_add(z.v_, x.v_, y.v_, Fp::op_.p); } + static void sub(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_sub(z.v_, x.v_, y.v_, Fp::op_.p); } + static void mod(Fp& z, const FpDblT& xy) { Fp::op_.fpDbl_mod(z.v_, xy.v_, Fp::op_.p); } +#endif + static void addPreC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_addPre(z.v_, x.v_, y.v_); } + static void subPreC(FpDblT& z, const FpDblT& x, const FpDblT& y) { Fp::op_.fpDbl_subPre(z.v_, x.v_, y.v_); } + static void mulPreC(FpDblT& xy, const Fp& x, const Fp& y) { Fp::op_.fpDbl_mulPre(xy.v_, x.v_, y.v_); } + static void sqrPreC(FpDblT& xx, const Fp& x) { Fp::op_.fpDbl_sqrPre(xx.v_, x.v_); } + static void (*addPre)(FpDblT& z, const FpDblT& x, const FpDblT& y); + static void (*subPre)(FpDblT& z, const FpDblT& x, const FpDblT& y); + /* + mul(z, x, y) = mulPre(xy, x, y) + mod(z, xy) + */ + static void (*mulPre)(FpDblT& xy, const Fp& x, const Fp& y); + static void (*sqrPre)(FpDblT& xx, const Fp& x); + static void mulUnit(FpDblT& z, const FpDblT& x, Unit y) + { + if (mulSmallUnit(z, x, y)) return; + assert(0); // not supported y + } + static void init() + { + const mcl::fp::Op& op = Fp::getOp(); +#ifdef MCL_XBYAK_DIRECT_CALL + add = (void (*)(FpDblT&, const FpDblT&, const FpDblT&))op.fpDbl_addA_; + if (add == 0) add = addC; + sub = (void (*)(FpDblT&, const FpDblT&, const FpDblT&))op.fpDbl_subA_; + if (sub == 0) sub = subC; + mod = (void (*)(Fp&, const FpDblT&))op.fpDbl_modA_; + if (mod == 0) mod = modC; +#endif + if (op.fpDbl_addPreA_) { + addPre = (void (*)(FpDblT&, const FpDblT&, const FpDblT&))op.fpDbl_addPreA_; + } else { + addPre = addPreC; + } + if (op.fpDbl_subPreA_) { + subPre = (void (*)(FpDblT&, const FpDblT&, const FpDblT&))op.fpDbl_subPreA_; + } else { + subPre = subPreC; + } + if (op.fpDbl_mulPreA_) { + mulPre = (void (*)(FpDblT&, const Fp&, const Fp&))op.fpDbl_mulPreA_; + } else { + mulPre = mulPreC; + } + if (op.fpDbl_sqrPreA_) { + sqrPre = (void (*)(FpDblT&, const Fp&))op.fpDbl_sqrPreA_; + } else { + sqrPre = sqrPreC; + } + } + void operator+=(const FpDblT& x) { add(*this, *this, x); } + void operator-=(const FpDblT& x) { sub(*this, *this, x); } +}; + +#ifdef MCL_XBYAK_DIRECT_CALL +template<class Fp> void (*FpDblT<Fp>::add)(FpDblT&, const FpDblT&, const FpDblT&); +template<class Fp> void (*FpDblT<Fp>::sub)(FpDblT&, const FpDblT&, const FpDblT&); +template<class Fp> void (*FpDblT<Fp>::mod)(Fp&, const FpDblT&); +#endif +template<class Fp> void (*FpDblT<Fp>::addPre)(FpDblT&, const FpDblT&, const FpDblT&); +template<class Fp> void (*FpDblT<Fp>::subPre)(FpDblT&, const FpDblT&, const FpDblT&); +template<class Fp> void (*FpDblT<Fp>::mulPre)(FpDblT&, const Fp&, const Fp&); +template<class Fp> void (*FpDblT<Fp>::sqrPre)(FpDblT&, const Fp&); + +template<class Fp> struct Fp12T; +template<class Fp> class BNT; +template<class Fp> struct Fp2DblT; +/* + beta = -1 + Fp2 = F[i] / (i^2 + 1) + x = a + bi +*/ +template<class _Fp> +class Fp2T : public fp::Serializable<Fp2T<_Fp>, + fp::Operator<Fp2T<_Fp> > > { + typedef _Fp Fp; + typedef fp::Unit Unit; + typedef FpDblT<Fp> FpDbl; + typedef Fp2DblT<Fp> Fp2Dbl; + static uint32_t xi_a_; + static const size_t gN = 5; + /* + g = xi^((p - 1) / 6) + g[] = { g^2, g^4, g^1, g^3, g^5 } + */ + static Fp2T g[gN]; + static Fp2T g2[gN]; + static Fp2T g3[gN]; +public: + static const Fp2T *get_gTbl() { return &g[0]; } + static const Fp2T *get_g2Tbl() { return &g2[0]; } + static const Fp2T *get_g3Tbl() { return &g3[0]; } + typedef typename Fp::BaseFp BaseFp; + static const size_t maxSize = Fp::maxSize * 2; + static inline size_t getByteSize() { return Fp::getByteSize() * 2; } + void dump() const + { + a.dump(); + b.dump(); + } + Fp a, b; + Fp2T() { } + Fp2T(int64_t a) : a(a), b(0) { } + Fp2T(const Fp& a, const Fp& b) : a(a), b(b) { } + Fp2T(int64_t a, int64_t b) : a(a), b(b) { } + Fp* getFp0() { return &a; } + const Fp* getFp0() const { return &a; } + const Unit* getUnit() const { return a.getUnit(); } + void clear() + { + a.clear(); + b.clear(); + } + void set(const Fp &a_, const Fp &b_) + { + a = a_; + b = b_; + } + static void (*add)(Fp2T& z, const Fp2T& x, const Fp2T& y); + static void (*sub)(Fp2T& z, const Fp2T& x, const Fp2T& y); + static void (*neg)(Fp2T& y, const Fp2T& x); + static void (*mul)(Fp2T& z, const Fp2T& x, const Fp2T& y); + static void (*sqr)(Fp2T& y, const Fp2T& x); + static void (*mul_xi)(Fp2T& y, const Fp2T& x); + static void addPre(Fp2T& z, const Fp2T& x, const Fp2T& y) { Fp::addPre(z.a, x.a, y.a); Fp::addPre(z.b, x.b, y.b); } + static void inv(Fp2T& y, const Fp2T& x) { Fp::op_.fp2_inv(y.a.v_, x.a.v_); } + static void divBy2(Fp2T& y, const Fp2T& x) + { + Fp::divBy2(y.a, x.a); + Fp::divBy2(y.b, x.b); + } + static void divBy4(Fp2T& y, const Fp2T& x) + { + Fp::divBy4(y.a, x.a); + Fp::divBy4(y.b, x.b); + } + static void mulFp(Fp2T& z, const Fp2T& x, const Fp& y) + { + Fp::mul(z.a, x.a, y); + Fp::mul(z.b, x.b, y); + } + template<class S> + void setArray(bool *pb, const S *buf, size_t n) + { + assert((n & 1) == 0); + n /= 2; + a.setArray(pb, buf, n); + if (!*pb) return; + b.setArray(pb, buf + n, n); + } + template<class InputStream> + void load(bool *pb, InputStream& is, int ioMode) + { + a.load(pb, is, ioMode); + if (!*pb) return; + b.load(pb, is, ioMode); + } + /* + Fp2T = <a> + ' ' + <b> + */ + template<class OutputStream> + void save(bool *pb, OutputStream& os, int ioMode) const + { + const char sep = *fp::getIoSeparator(ioMode); + a.save(pb, os, ioMode); + if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + b.save(pb, os, ioMode); + } + bool isZero() const { return a.isZero() && b.isZero(); } + bool isOne() const { return a.isOne() && b.isZero(); } + bool operator==(const Fp2T& rhs) const { return a == rhs.a && b == rhs.b; } + bool operator!=(const Fp2T& rhs) const { return !operator==(rhs); } + /* + return true is a is odd (do not consider b) + this function is for only compressed reprezentation of EC + isOdd() is not good naming. QQQ + */ + bool isOdd() const { return a.isOdd(); } + /* + (a + bi)^2 = (a^2 - b^2) + 2ab i = c + di + A = a^2 + B = b^2 + A = (c +/- sqrt(c^2 + d^2))/2 + b = d / 2a + */ + static inline bool squareRoot(Fp2T& y, const Fp2T& x) + { + Fp t1, t2; + if (x.b.isZero()) { + if (Fp::squareRoot(t1, x.a)) { + y.a = t1; + y.b.clear(); + } else { + bool b = Fp::squareRoot(t1, -x.a); + assert(b); (void)b; + y.a.clear(); + y.b = t1; + } + return true; + } + Fp::sqr(t1, x.a); + Fp::sqr(t2, x.b); + t1 += t2; // c^2 + d^2 + if (!Fp::squareRoot(t1, t1)) return false; + Fp::add(t2, x.a, t1); + Fp::divBy2(t2, t2); + if (!Fp::squareRoot(t2, t2)) { + Fp::sub(t2, x.a, t1); + Fp::divBy2(t2, t2); + bool b = Fp::squareRoot(t2, t2); + assert(b); (void)b; + } + y.a = t2; + t2 += t2; + Fp::inv(t2, t2); + Fp::mul(y.b, x.b, t2); + return true; + } + static void inline norm(Fp& y, const Fp2T& x) + { + Fp aa, bb; + Fp::sqr(aa, x.a); + Fp::sqr(bb, x.b); + Fp::add(y, aa, bb); + } + /* + Frobenius + i^2 = -1 + (a + bi)^p = a + bi^p in Fp + = a + bi if p = 1 mod 4 + = a - bi if p = 3 mod 4 + */ + static void Frobenius(Fp2T& y, const Fp2T& x) + { + if (Fp::getOp().pmod4 == 1) { + if (&y != &x) { + y = x; + } + } else { + if (&y != &x) { + y.a = x.a; + } + Fp::neg(y.b, x.b); + } + } + + static uint32_t get_xi_a() { return xi_a_; } + static void init(uint32_t xi_a) + { +// assert(Fp::maxSize <= 256); + xi_a_ = xi_a; + mcl::fp::Op& op = Fp::op_; + add = (void (*)(Fp2T& z, const Fp2T& x, const Fp2T& y))op.fp2_addA_; + if (add == 0) add = fp2_addC; + sub = (void (*)(Fp2T& z, const Fp2T& x, const Fp2T& y))op.fp2_subA_; + if (sub == 0) sub = fp2_subC; + neg = (void (*)(Fp2T& y, const Fp2T& x))op.fp2_negA_; + if (neg == 0) neg = fp2_negC; + mul = (void (*)(Fp2T& z, const Fp2T& x, const Fp2T& y))op.fp2_mulA_; + if (mul == 0) { + if (op.isFastMod) { + mul = fp2_mulC; + } else if (!op.isFullBit) { + if (0 && sizeof(Fp) * 8 == op.N * fp::UnitBitSize && op.fp2_mulNF) { + mul = fp2_mulNFW; + } else { + mul = fp2_mulC; + } + } else { + mul = fp2_mulC; + } + } + sqr = (void (*)(Fp2T& y, const Fp2T& x))op.fp2_sqrA_; + if (sqr == 0) sqr = fp2_sqrC; + op.fp2_inv = fp2_invW; + if (xi_a == 1) { + /* + current fp_generator.hpp generates mul_xi for xi_a = 1 + */ + if (op.fp2_mul_xiA_) { + mul_xi = (void (*)(Fp2T&, const Fp2T&))op.fp2_mul_xiA_; + } else { + mul_xi = fp2_mul_xi_1_1i; + } + } else { + mul_xi = fp2_mul_xiC; + } + FpDblT<Fp>::init(); + Fp2DblT<Fp>::init(); + // call init before Fp2::pow because FpDbl is used in Fp2T + const Fp2T xi(xi_a, 1); + const mpz_class& p = Fp::getOp().mp; + Fp2T::pow(g[0], xi, (p - 1) / 6); // g = xi^((p-1)/6) + for (size_t i = 1; i < gN; i++) { + g[i] = g[i - 1] * g[0]; + } + /* + permutate [0, 1, 2, 3, 4] => [1, 3, 0, 2, 4] + g[0] = g^2 + g[1] = g^4 + g[2] = g^1 + g[3] = g^3 + g[4] = g^5 + */ + { + Fp2T t = g[0]; + g[0] = g[1]; + g[1] = g[3]; + g[3] = g[2]; + g[2] = t; + } + for (size_t i = 0; i < gN; i++) { + Fp2T t(g[i].a, g[i].b); + if (Fp::getOp().pmod4 == 3) Fp::neg(t.b, t.b); + Fp2T::mul(g2[i], t, g[i]); + g3[i] = g[i] * g2[i]; + } + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("Fp2T:load"); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("Fp2T:save"); + } + template<class S> + void setArray(const S *buf, size_t n) + { + bool b; + setArray(&b, buf, n); + if (!b) throw cybozu::Exception("Fp2T:setArray"); + } +#endif +#ifndef CYBOZU_DONT_USE_STRING + Fp2T(const std::string& a, const std::string& b, int base = 0) : a(a, base), b(b, base) {} + friend std::istream& operator>>(std::istream& is, Fp2T& self) + { + self.load(is, fp::detectIoMode(Fp::BaseFp::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const Fp2T& self) + { + self.save(os, fp::detectIoMode(Fp::BaseFp::getIoMode(), os)); + return os; + } +#endif +private: + /* + default Fp2T operator + Fp2T = Fp[i]/(i^2 + 1) + */ + static void fp2_addC(Fp2T& z, const Fp2T& x, const Fp2T& y) + { + Fp::add(z.a, x.a, y.a); + Fp::add(z.b, x.b, y.b); + } + static void fp2_subC(Fp2T& z, const Fp2T& x, const Fp2T& y) + { + Fp::sub(z.a, x.a, y.a); + Fp::sub(z.b, x.b, y.b); + } + static void fp2_negC(Fp2T& y, const Fp2T& x) + { + Fp::neg(y.a, x.a); + Fp::neg(y.b, x.b); + } +#if 0 + /* + x = a + bi, y = c + di, i^2 = -1 + z = xy = (a + bi)(c + di) = (ac - bd) + (ad + bc)i + ad+bc = (a + b)(c + d) - ac - bd + # of mod = 3 + */ + static void fp2_mulW(Unit *z, const Unit *x, const Unit *y) + { + const Fp *px = reinterpret_cast<const Fp*>(x); + const Fp *py = reinterpret_cast<const Fp*>(y); + const Fp& a = px[0]; + const Fp& b = px[1]; + const Fp& c = py[0]; + const Fp& d = py[1]; + Fp *pz = reinterpret_cast<Fp*>(z); + Fp t1, t2, ac, bd; + Fp::add(t1, a, b); + Fp::add(t2, c, d); + t1 *= t2; // (a + b)(c + d) + Fp::mul(ac, a, c); + Fp::mul(bd, b, d); + Fp::sub(pz[0], ac, bd); // ac - bd + Fp::sub(pz[1], t1, ac); + pz[1] -= bd; + } +#endif + static void fp2_mulNFW(Fp2T& z, const Fp2T& x, const Fp2T& y) + { + const fp::Op& op = Fp::op_; + op.fp2_mulNF((Unit*)&z, (const Unit*)&x, (const Unit*)&y, op.p); + } + static void fp2_mulC(Fp2T& z, const Fp2T& x, const Fp2T& y) + { + Fp2Dbl d; + Fp2Dbl::mulPre(d, x, y); + FpDbl::mod(z.a, d.a); + FpDbl::mod(z.b, d.b); + } + /* + x = a + bi, i^2 = -1 + y = x^2 = (a + bi)^2 = (a + b)(a - b) + 2abi + */ + static void fp2_sqrC(Fp2T& y, const Fp2T& x) + { + const Fp& a = x.a; + const Fp& b = x.b; +#if 1 // faster than using FpDbl + Fp t1, t2, t3; + Fp::add(t1, b, b); // 2b + t1 *= a; // 2ab + Fp::add(t2, a, b); // a + b + Fp::sub(t3, a, b); // a - b + Fp::mul(y.a, t2, t3); // (a + b)(a - b) + y.b = t1; +#else + Fp t1, t2; + FpDbl d1, d2; + Fp::addPre(t1, b, b); // 2b + FpDbl::mulPre(d2, t1, a); // 2ab + Fp::addPre(t1, a, b); // a + b + Fp::sub(t2, a, b); // a - b + FpDbl::mulPre(d1, t1, t2); // (a + b)(a - b) + FpDbl::mod(py[0], d1); + FpDbl::mod(py[1], d2); +#endif + } + /* + xi = xi_a + i + x = a + bi + y = (a + bi)xi = (a + bi)(xi_a + i) + =(a * x_ia - b) + (a + b xi_a)i + */ + static void fp2_mul_xiC(Fp2T& y, const Fp2T& x) + { + const Fp& a = x.a; + const Fp& b = x.b; + Fp t; + Fp::mulUnit(t, a, xi_a_); + t -= b; + Fp::mulUnit(y.b, b, xi_a_); + y.b += a; + y.a = t; + } + /* + xi = 1 + i ; xi_a = 1 + y = (a + bi)xi = (a - b) + (a + b)i + */ + static void fp2_mul_xi_1_1i(Fp2T& y, const Fp2T& x) + { + const Fp& a = x.a; + const Fp& b = x.b; + Fp t; + Fp::add(t, a, b); + Fp::sub(y.a, a, b); + y.b = t; + } + /* + x = a + bi + 1 / x = (a - bi) / (a^2 + b^2) + */ + static void fp2_invW(Unit *y, const Unit *x) + { + const Fp *px = reinterpret_cast<const Fp*>(x); + Fp *py = reinterpret_cast<Fp*>(y); + const Fp& a = px[0]; + const Fp& b = px[1]; + Fp aa, bb; + Fp::sqr(aa, a); + Fp::sqr(bb, b); + aa += bb; + Fp::inv(aa, aa); // aa = 1 / (a^2 + b^2) + Fp::mul(py[0], a, aa); + Fp::mul(py[1], b, aa); + Fp::neg(py[1], py[1]); + } +}; + +template<class Fp_> void (*Fp2T<Fp_>::add)(Fp2T& z, const Fp2T& x, const Fp2T& y); +template<class Fp_> void (*Fp2T<Fp_>::sub)(Fp2T& z, const Fp2T& x, const Fp2T& y); +template<class Fp_> void (*Fp2T<Fp_>::neg)(Fp2T& y, const Fp2T& x); +template<class Fp_> void (*Fp2T<Fp_>::mul)(Fp2T& z, const Fp2T& x, const Fp2T& y); +template<class Fp_> void (*Fp2T<Fp_>::sqr)(Fp2T& y, const Fp2T& x); +template<class Fp_> void (*Fp2T<Fp_>::mul_xi)(Fp2T& y, const Fp2T& x); + +template<class Fp> +struct Fp2DblT { + typedef FpDblT<Fp> FpDbl; + typedef Fp2T<Fp> Fp2; + typedef fp::Unit Unit; + FpDbl a, b; + static void add(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y) + { + FpDbl::add(z.a, x.a, y.a); + FpDbl::add(z.b, x.b, y.b); + } + static void addPre(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y) + { + FpDbl::addPre(z.a, x.a, y.a); + FpDbl::addPre(z.b, x.b, y.b); + } + static void sub(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y) + { + FpDbl::sub(z.a, x.a, y.a); + FpDbl::sub(z.b, x.b, y.b); + } + static void subPre(Fp2DblT& z, const Fp2DblT& x, const Fp2DblT& y) + { + FpDbl::subPre(z.a, x.a, y.a); + FpDbl::subPre(z.b, x.b, y.b); + } + static void neg(Fp2DblT& y, const Fp2DblT& x) + { + FpDbl::neg(y.a, x.a); + FpDbl::neg(y.b, x.b); + } + static void mul_xi(Fp2DblT& y, const Fp2DblT& x) + { + const uint32_t xi_a = Fp2::get_xi_a(); + if (xi_a == 1) { + FpDbl t; + FpDbl::add(t, x.a, x.b); + FpDbl::sub(y.a, x.a, x.b); + y.b = t; + } else { + FpDbl t; + FpDbl::mulUnit(t, x.a, xi_a); + FpDbl::sub(t, t, x.b); + FpDbl::mulUnit(y.b, x.b, xi_a); + FpDbl::add(y.b, y.b, x.a); + y.a = t; + } + } + static void (*mulPre)(Fp2DblT&, const Fp2&, const Fp2&); + static void (*sqrPre)(Fp2DblT&, const Fp2&); + static void mod(Fp2& y, const Fp2DblT& x) + { + FpDbl::mod(y.a, x.a); + FpDbl::mod(y.b, x.b); + } +#ifndef CYBOZU_DONT_USE_STRING + friend std::ostream& operator<<(std::ostream& os, const Fp2DblT& x) + { + return os << x.a << ' ' << x.b; + } +#endif + void operator+=(const Fp2DblT& x) { add(*this, *this, x); } + void operator-=(const Fp2DblT& x) { sub(*this, *this, x); } + static void init() + { + const mcl::fp::Op& op = Fp::getOp(); + if (op.fp2Dbl_mulPreA_) { + mulPre = (void (*)(Fp2DblT&, const Fp2&, const Fp2&))op.fp2Dbl_mulPreA_; + } else { + if (op.isFullBit) { + mulPre = fp2Dbl_mulPreW<true>; + } else { + mulPre = fp2Dbl_mulPreW<false>; + } + } + if (op.fp2Dbl_sqrPreA_) { + sqrPre = (void (*)(Fp2DblT&, const Fp2&))op.fp2Dbl_sqrPreA_; + } else { + if (op.isFullBit) { + sqrPre = fp2Dbl_sqrPreW<true>; + } else { + sqrPre = fp2Dbl_sqrPreW<false>; + } + } + } + /* + Fp2Dbl::mulPre by FpDblT + @note mod of NIST_P192 is fast + */ + template<bool isFullBit> + static void fp2Dbl_mulPreW(Fp2DblT& z, const Fp2& x, const Fp2& y) + { + const Fp& a = x.a; + const Fp& b = x.b; + const Fp& c = y.a; + const Fp& d = y.b; + FpDbl& d0 = z.a; + FpDbl& d1 = z.b; + FpDbl d2; + Fp s, t; + if (isFullBit) { + Fp::add(s, a, b); + Fp::add(t, c, d); + } else { + Fp::addPre(s, a, b); + Fp::addPre(t, c, d); + } + FpDbl::mulPre(d1, s, t); // (a + b)(c + d) + FpDbl::mulPre(d0, a, c); + FpDbl::mulPre(d2, b, d); + if (isFullBit) { + FpDbl::sub(d1, d1, d0); // (a + b)(c + d) - ac + FpDbl::sub(d1, d1, d2); // (a + b)(c + d) - ac - bd + } else { + FpDbl::subPre(d1, d1, d0); + FpDbl::subPre(d1, d1, d2); + } + FpDbl::sub(d0, d0, d2); // ac - bd + } + template<bool isFullBit> + static void fp2Dbl_sqrPreW(Fp2DblT& y, const Fp2& x) + { + Fp t1, t2; + if (isFullBit) { + Fp::add(t1, x.b, x.b); // 2b + Fp::add(t2, x.a, x.b); // a + b + } else { + Fp::addPre(t1, x.b, x.b); // 2b + Fp::addPre(t2, x.a, x.b); // a + b + } + FpDbl::mulPre(y.b, t1, x.a); // 2ab + Fp::sub(t1, x.a, x.b); // a - b + FpDbl::mulPre(y.a, t1, t2); // (a + b)(a - b) + } +}; + +template<class Fp> void (*Fp2DblT<Fp>::mulPre)(Fp2DblT&, const Fp2T<Fp>&, const Fp2T<Fp>&); +template<class Fp> void (*Fp2DblT<Fp>::sqrPre)(Fp2DblT&, const Fp2T<Fp>&); + +template<class Fp> uint32_t Fp2T<Fp>::xi_a_; +template<class Fp> Fp2T<Fp> Fp2T<Fp>::g[Fp2T<Fp>::gN]; +template<class Fp> Fp2T<Fp> Fp2T<Fp>::g2[Fp2T<Fp>::gN]; +template<class Fp> Fp2T<Fp> Fp2T<Fp>::g3[Fp2T<Fp>::gN]; + +/* + Fp6T = Fp2[v] / (v^3 - xi) + x = a + b v + c v^2 +*/ +template<class _Fp> +struct Fp6T : public fp::Serializable<Fp6T<_Fp>, + fp::Operator<Fp6T<_Fp> > > { + typedef _Fp Fp; + typedef Fp2T<Fp> Fp2; + typedef Fp2DblT<Fp> Fp2Dbl; + typedef Fp BaseFp; + Fp2 a, b, c; + Fp6T() { } + Fp6T(int64_t a) : a(a) , b(0) , c(0) { } + Fp6T(const Fp2& a, const Fp2& b, const Fp2& c) : a(a) , b(b) , c(c) { } + void clear() + { + a.clear(); + b.clear(); + c.clear(); + } + Fp* getFp0() { return a.getFp0(); } + const Fp* getFp0() const { return a.getFp0(); } + Fp2* getFp2() { return &a; } + const Fp2* getFp2() const { return &a; } + void set(const Fp2 &a_, const Fp2 &b_, const Fp2 &c_) + { + a = a_; + b = b_; + c = c_; + } + bool isZero() const + { + return a.isZero() && b.isZero() && c.isZero(); + } + bool isOne() const + { + return a.isOne() && b.isZero() && c.isZero(); + } + bool operator==(const Fp6T& rhs) const + { + return a == rhs.a && b == rhs.b && c == rhs.c; + } + bool operator!=(const Fp6T& rhs) const { return !operator==(rhs); } + template<class InputStream> + void load(bool *pb, InputStream& is, int ioMode) + { + a.load(pb, is, ioMode); if (!*pb) return; + b.load(pb, is, ioMode); if (!*pb) return; + c.load(pb, is, ioMode); if (!*pb) return; + } + template<class OutputStream> + void save(bool *pb, OutputStream& os, int ioMode) const + { + const char sep = *fp::getIoSeparator(ioMode); + a.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + b.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + c.save(pb, os, ioMode); + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("Fp6T:load"); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("Fp6T:save"); + } +#endif +#ifndef CYBOZU_DONT_USE_STRING + friend std::istream& operator>>(std::istream& is, Fp6T& self) + { + self.load(is, fp::detectIoMode(Fp::BaseFp::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const Fp6T& self) + { + self.save(os, fp::detectIoMode(Fp::BaseFp::getIoMode(), os)); + return os; + } +#endif + static void add(Fp6T& z, const Fp6T& x, const Fp6T& y) + { + Fp2::add(z.a, x.a, y.a); + Fp2::add(z.b, x.b, y.b); + Fp2::add(z.c, x.c, y.c); + } + static void sub(Fp6T& z, const Fp6T& x, const Fp6T& y) + { + Fp2::sub(z.a, x.a, y.a); + Fp2::sub(z.b, x.b, y.b); + Fp2::sub(z.c, x.c, y.c); + } + static void neg(Fp6T& y, const Fp6T& x) + { + Fp2::neg(y.a, x.a); + Fp2::neg(y.b, x.b); + Fp2::neg(y.c, x.c); + } + /* + x = a + bv + cv^2, v^3 = xi + x^2 = (a^2 + 2bc xi) + (c^2 xi + 2ab)v + (b^2 + 2ac)v^2 + + b^2 + 2ac = (a + b + c)^2 - a^2 - 2bc - c^2 - 2ab + */ + static void sqr(Fp6T& y, const Fp6T& x) + { + Fp2 t1, t2, t3; + Fp2::mul(t1, x.a, x.b); + t1 += t1; // 2ab + Fp2::mul(t2, x.b, x.c); + t2 += t2; // 2bc + Fp2::sqr(t3, x.c); // c^2 + Fp2::add(y.c, x.a, x.c); // a + c, destroy y.c + y.c += x.b; // a + b + c + Fp2::sqr(y.b, y.c); // (a + b + c)^2, destroy y.b + y.b -= t2; // (a + b + c)^2 - 2bc + Fp2::mul_xi(t2, t2); // 2bc xi + Fp2::sqr(y.a, x.a); // a^2, destroy y.a + y.b -= y.a; // (a + b + c)^2 - 2bc - a^2 + y.a += t2; // a^2 + 2bc xi + Fp2::sub(y.c, y.b, t3); // (a + b + c)^2 - 2bc - a^2 - c^2 + Fp2::mul_xi(y.b, t3); // c^2 xi + y.b += t1; // c^2 xi + 2ab + y.c -= t1; // b^2 + 2ac + } + /* + x = a + bv + cv^2, y = d + ev + fv^2, v^3 = xi + xy = (ad + (bf + ce)xi) + ((ae + bd) + cf xi)v + ((af + cd) + be)v^2 + bf + ce = (b + c)(e + f) - be - cf + ae + bd = (a + b)(e + d) - ad - be + af + cd = (a + c)(d + f) - ad - cf + */ + static void mul(Fp6T& z, const Fp6T& x, const Fp6T& y) + { +//clk.begin(); + const Fp2& a = x.a; + const Fp2& b = x.b; + const Fp2& c = x.c; + const Fp2& d = y.a; + const Fp2& e = y.b; + const Fp2& f = y.c; +#if 1 + Fp2Dbl AD, BE, CF; + Fp2Dbl::mulPre(AD, a, d); + Fp2Dbl::mulPre(BE, b, e); + Fp2Dbl::mulPre(CF, c, f); + + Fp2 t1, t2, t3, t4; + Fp2::add(t1, b, c); + Fp2::add(t2, e, f); + Fp2Dbl T1; + Fp2Dbl::mulPre(T1, t1, t2); + Fp2Dbl::sub(T1, T1, BE); + Fp2Dbl::sub(T1, T1, CF); + Fp2Dbl::mul_xi(T1, T1); + + Fp2::add(t2, a, b); + Fp2::add(t3, e, d); + Fp2Dbl T2; + Fp2Dbl::mulPre(T2, t2, t3); + Fp2Dbl::sub(T2, T2, AD); + Fp2Dbl::sub(T2, T2, BE); + + Fp2::add(t3, a, c); + Fp2::add(t4, d, f); + Fp2Dbl T3; + Fp2Dbl::mulPre(T3, t3, t4); + Fp2Dbl::sub(T3, T3, AD); + Fp2Dbl::sub(T3, T3, CF); + + Fp2Dbl::add(AD, AD, T1); + Fp2Dbl::mod(z.a, AD); + Fp2Dbl::mul_xi(CF, CF); + Fp2Dbl::add(CF, CF, T2); + Fp2Dbl::mod(z.b, CF); + Fp2Dbl::add(T3, T3, BE); + Fp2Dbl::mod(z.c, T3); +#else + Fp2 ad, be, cf; + Fp2::mul(ad, a, d); + Fp2::mul(be, b, e); + Fp2::mul(cf, c, f); + + Fp2 t1, t2, t3, t4; + Fp2::add(t1, b, c); + Fp2::add(t2, e, f); + t1 *= t2; + t1 -= be; + t1 -= cf; + Fp2::mul_xi(t1, t1); + + Fp2::add(t2, a, b); + Fp2::add(t3, e, d); + t2 *= t3; + t2 -= ad; + t2 -= be; + + Fp2::add(t3, a, c); + Fp2::add(t4, d, f); + t3 *= t4; + t3 -= ad; + t3 -= cf; + + Fp2::add(z.a, ad, t1); + Fp2::mul_xi(z.b, cf); + z.b += t2; + Fp2::add(z.c, t3, be); +#endif +//clk.end(); + } + /* + x = a + bv + cv^2, v^3 = xi + y = 1/x = p/q where + p = (a^2 - bc xi) + (c^2 xi - ab)v + (b^2 - ac)v^2 + q = c^3 xi^2 + b(b^2 - 3ac)xi + a^3 + = (a^2 - bc xi)a + ((c^2 xi - ab)c + (b^2 - ac)b) xi + */ + static void inv(Fp6T& y, const Fp6T& x) + { + const Fp2& a = x.a; + const Fp2& b = x.b; + const Fp2& c = x.c; + Fp2 aa, bb, cc, ab, bc, ac; + Fp2::sqr(aa, a); + Fp2::sqr(bb, b); + Fp2::sqr(cc, c); + Fp2::mul(ab, a, b); + Fp2::mul(bc, b, c); + Fp2::mul(ac, c, a); + + Fp6T p; + Fp2::mul_xi(p.a, bc); + Fp2::sub(p.a, aa, p.a); // a^2 - bc xi + Fp2::mul_xi(p.b, cc); + p.b -= ab; // c^2 xi - ab + Fp2::sub(p.c, bb, ac); // b^2 - ac + Fp2 q, t; + Fp2::mul(q, p.b, c); + Fp2::mul(t, p.c, b); + q += t; + Fp2::mul_xi(q, q); + Fp2::mul(t, p.a, a); + q += t; + Fp2::inv(q, q); + + Fp2::mul(y.a, p.a, q); + Fp2::mul(y.b, p.b, q); + Fp2::mul(y.c, p.c, q); + } +}; + +/* + Fp12T = Fp6[w] / (w^2 - v) + x = a + b w +*/ +template<class Fp> +struct Fp12T : public fp::Serializable<Fp12T<Fp>, + fp::Operator<Fp12T<Fp> > > { + typedef Fp2T<Fp> Fp2; + typedef Fp6T<Fp> Fp6; + typedef Fp BaseFp; + Fp6 a, b; + Fp12T() {} + Fp12T(int64_t a) : a(a), b(0) {} + Fp12T(const Fp6& a, const Fp6& b) : a(a), b(b) {} + void clear() + { + a.clear(); + b.clear(); + } + void setOne() + { + clear(); + a.a.a = 1; + } + + Fp* getFp0() { return a.getFp0(); } + const Fp* getFp0() const { return a.getFp0(); } + Fp2* getFp2() { return a.getFp2(); } + const Fp2* getFp2() const { return a.getFp2(); } + void set(const Fp2& v0, const Fp2& v1, const Fp2& v2, const Fp2& v3, const Fp2& v4, const Fp2& v5) + { + a.set(v0, v1, v2); + b.set(v3, v4, v5); + } + + bool isZero() const + { + return a.isZero() && b.isZero(); + } + bool isOne() const + { + return a.isOne() && b.isZero(); + } + bool operator==(const Fp12T& rhs) const + { + return a == rhs.a && b == rhs.b; + } + bool operator!=(const Fp12T& rhs) const { return !operator==(rhs); } + static void add(Fp12T& z, const Fp12T& x, const Fp12T& y) + { + Fp6::add(z.a, x.a, y.a); + Fp6::add(z.b, x.b, y.b); + } + static void sub(Fp12T& z, const Fp12T& x, const Fp12T& y) + { + Fp6::sub(z.a, x.a, y.a); + Fp6::sub(z.b, x.b, y.b); + } + static void neg(Fp12T& z, const Fp12T& x) + { + Fp6::neg(z.a, x.a); + Fp6::neg(z.b, x.b); + } + /* + z = x v + y + in Fp6 : (a + bv + cv^2)v = cv^3 + av + bv^2 = cxi + av + bv^2 + */ + static void mulVadd(Fp6& z, const Fp6& x, const Fp6& y) + { + Fp2 t; + Fp2::mul_xi(t, x.c); + Fp2::add(z.c, x.b, y.c); + Fp2::add(z.b, x.a, y.b); + Fp2::add(z.a, t, y.a); + } + /* + x = a + bw, y = c + dw, w^2 = v + z = xy = (a + bw)(c + dw) = (ac + bdv) + (ad + bc)w + ad+bc = (a + b)(c + d) - ac - bd + + in Fp6 : (a + bv + cv^2)v = cv^3 + av + bv^2 = cxi + av + bv^2 + */ + static void mul(Fp12T& z, const Fp12T& x, const Fp12T& y) + { + const Fp6& a = x.a; + const Fp6& b = x.b; + const Fp6& c = y.a; + const Fp6& d = y.b; + Fp6 t1, t2, ac, bd; + Fp6::add(t1, a, b); + Fp6::add(t2, c, d); + t1 *= t2; // (a + b)(c + d) + Fp6::mul(ac, a, c); + Fp6::mul(bd, b, d); + mulVadd(z.a, bd, ac); + t1 -= ac; + Fp6::sub(z.b, t1, bd); + } + /* + x = a + bw, w^2 = v + y = x^2 = (a + bw)^2 = (a^2 + b^2v) + 2abw + a^2 + b^2v = (a + b)(bv + a) - (abv + ab) + */ + static void sqr(Fp12T& y, const Fp12T& x) + { + const Fp6& a = x.a; + const Fp6& b = x.b; + Fp6 t0, t1; + Fp6::add(t0, a, b); // a + b + mulVadd(t1, b, a); // bv + a + t0 *= t1; // (a + b)(bv + a) + Fp6::mul(t1, a, b); // ab + Fp6::add(y.b, t1, t1); // 2ab + mulVadd(y.a, t1, t1); // abv + ab + Fp6::sub(y.a, t0, y.a); + } + /* + x = a + bw, w^2 = v + y = 1/x = (a - bw) / (a^2 - b^2v) + */ + static void inv(Fp12T& y, const Fp12T& x) + { + const Fp6& a = x.a; + const Fp6& b = x.b; + Fp6 t0, t1; + Fp6::sqr(t0, a); + Fp6::sqr(t1, b); + Fp2::mul_xi(t1.c, t1.c); + t0.a -= t1.c; + t0.b -= t1.a; + t0.c -= t1.b; // t0 = a^2 - b^2v + Fp6::inv(t0, t0); + Fp6::mul(y.a, x.a, t0); + Fp6::mul(y.b, x.b, t0); + Fp6::neg(y.b, y.b); + } + /* + y = 1 / x = conjugate of x if |x| = 1 + */ + static void unitaryInv(Fp12T& y, const Fp12T& x) + { + if (&y != &x) y.a = x.a; + Fp6::neg(y.b, x.b); + } + /* + Frobenius + i^2 = -1 + (a + bi)^p = a + bi^p in Fp + = a + bi if p = 1 mod 4 + = a - bi if p = 3 mod 4 + + g = xi^(p - 1) / 6 + v^3 = xi in Fp2 + v^p = ((v^6) ^ (p-1)/6) v = g^2 v + v^2p = g^4 v^2 + (a + bv + cv^2)^p in Fp6 + = F(a) + F(b)g^2 v + F(c) g^4 v^2 + + w^p = ((w^6) ^ (p-1)/6) w = g w + ((a + bv + cv^2)w)^p in Fp12T + = (F(a) g + F(b) g^3 v + F(c) g^5 v^2)w + */ + static void Frobenius(Fp12T& y, const Fp12T& x) + { + for (int i = 0; i < 6; i++) { + Fp2::Frobenius(y.getFp2()[i], x.getFp2()[i]); + } + for (int i = 1; i < 6; i++) { + y.getFp2()[i] *= Fp2::get_gTbl()[i - 1]; + } + } + static void Frobenius2(Fp12T& y, const Fp12T& x) + { +#if 0 + Frobenius(y, x); + Frobenius(y, y); +#else + y.getFp2()[0] = x.getFp2()[0]; + if (Fp::getOp().pmod4 == 1) { + for (int i = 1; i < 6; i++) { + Fp2::mul(y.getFp2()[i], x.getFp2()[i], Fp2::get_g2Tbl()[i]); + } + } else { + for (int i = 1; i < 6; i++) { + Fp2::mulFp(y.getFp2()[i], x.getFp2()[i], Fp2::get_g2Tbl()[i - 1].a); + } + } +#endif + } + static void Frobenius3(Fp12T& y, const Fp12T& x) + { +#if 0 + Frobenius(y, x); + Frobenius(y, y); + Frobenius(y, y); +#else + Fp2::Frobenius(y.getFp2()[0], x.getFp2()[0]); + for (int i = 1; i < 6; i++) { + Fp2::Frobenius(y.getFp2()[i], x.getFp2()[i]); + y.getFp2()[i] *= Fp2::get_g3Tbl()[i - 1]; + } +#endif + } + template<class InputStream> + void load(bool *pb, InputStream& is, int ioMode) + { + a.load(pb, is, ioMode); if (!*pb) return; + b.load(pb, is, ioMode); + } + template<class OutputStream> + void save(bool *pb, OutputStream& os, int ioMode) const + { + const char sep = *fp::getIoSeparator(ioMode); + a.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + b.save(pb, os, ioMode); + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("Fp12T:load"); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("Fp12T:save"); + } +#endif +#ifndef CYBOZU_DONT_USE_STRING + friend std::istream& operator>>(std::istream& is, Fp12T& self) + { + self.load(is, fp::detectIoMode(Fp::BaseFp::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const Fp12T& self) + { + self.save(os, fp::detectIoMode(Fp::BaseFp::getIoMode(), os)); + return os; + } +#endif +}; + +/* + convert multiplicative group to additive group +*/ +template<class T> +struct GroupMtoA : public T { + static T& castT(GroupMtoA& x) { return static_cast<T&>(x); } + static const T& castT(const GroupMtoA& x) { return static_cast<const T&>(x); } + void clear() + { + castT(*this) = 1; + } + bool isZero() const { return castT(*this).isOne(); } + static void add(GroupMtoA& z, const GroupMtoA& x, const GroupMtoA& y) + { + T::mul(castT(z), castT(x), castT(y)); + } + static void dbl(GroupMtoA& y, const GroupMtoA& x) + { + T::sqr(castT(y), castT(x)); + } + static void neg(GroupMtoA& y, const GroupMtoA& x) + { + // assume Fp12 + T::unitaryInv(castT(y), castT(x)); + } + static void Frobenus(GroupMtoA& y, const GroupMtoA& x) + { + T::Frobenius(castT(y), castT(x)); + } + template<class INT> + static void mul(GroupMtoA& z, const GroupMtoA& x, const INT& y) + { + T::pow(castT(z), castT(x), y); + } + template<class INT> + static void mulGeneric(GroupMtoA& z, const GroupMtoA& x, const INT& y) + { + T::powGeneric(castT(z), castT(x), y); + } + void operator+=(const GroupMtoA& rhs) + { + add(*this, *this, rhs); + } + void normalize() {} +private: + bool isOne() const; +}; + +} // mcl + diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/gmp_util.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/gmp_util.hpp new file mode 100644 index 000000000..2dd71eb50 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/gmp_util.hpp @@ -0,0 +1,861 @@ +#pragma once +/** + @file + @brief util function for gmp + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <stdio.h> +#include <stdlib.h> +#include <assert.h> +#include <stdint.h> +#include <cybozu/exception.hpp> +#include <mcl/randgen.hpp> +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4616) + #pragma warning(disable : 4800) + #pragma warning(disable : 4244) + #pragma warning(disable : 4127) + #pragma warning(disable : 4512) + #pragma warning(disable : 4146) +#endif +#if defined(__EMSCRIPTEN__) || defined(__wasm__) + #define MCL_USE_VINT +#endif +#ifdef MCL_USE_VINT +#include <mcl/vint.hpp> +typedef mcl::Vint mpz_class; +#else +#include <gmpxx.h> +#ifdef _MSC_VER + #pragma warning(pop) + #include <cybozu/link_mpir.hpp> +#endif +#endif + +#ifndef MCL_SIZEOF_UNIT + #if defined(CYBOZU_OS_BIT) && (CYBOZU_OS_BIT == 32) + #define MCL_SIZEOF_UNIT 4 + #else + #define MCL_SIZEOF_UNIT 8 + #endif +#endif + +namespace mcl { + +namespace fp { + +#if MCL_SIZEOF_UNIT == 8 +typedef uint64_t Unit; +#else +typedef uint32_t Unit; +#endif +#define MCL_UNIT_BIT_SIZE (MCL_SIZEOF_UNIT * 8) + +} // mcl::fp + +namespace gmp { + +typedef mpz_class ImplType; + +// z = [buf[n-1]:..:buf[1]:buf[0]] +// eg. buf[] = {0x12345678, 0xaabbccdd}; => z = 0xaabbccdd12345678; +template<class T> +void setArray(bool *pb, mpz_class& z, const T *buf, size_t n) +{ +#ifdef MCL_USE_VINT + z.setArray(pb, buf, n); +#else + mpz_import(z.get_mpz_t(), n, -1, sizeof(*buf), 0, 0, buf); + *pb = true; +#endif +} +/* + buf[0, size) = x + buf[size, maxSize) with zero +*/ +template<class T, class U> +bool getArray_(T *buf, size_t maxSize, const U *x, int xn)//const mpz_srcptr x) +{ + const size_t bufByteSize = sizeof(T) * maxSize; + if (xn < 0) return false; + size_t xByteSize = sizeof(*x) * xn; + if (xByteSize > bufByteSize) return false; + memcpy(buf, x, xByteSize); + memset((char*)buf + xByteSize, 0, bufByteSize - xByteSize); + return true; +} +template<class T> +void getArray(bool *pb, T *buf, size_t maxSize, const mpz_class& x) +{ +#ifdef MCL_USE_VINT + *pb = getArray_(buf, maxSize, x.getUnit(), x.getUnitSize()); +#else + *pb = getArray_(buf, maxSize, x.get_mpz_t()->_mp_d, x.get_mpz_t()->_mp_size); +#endif +} +inline void set(mpz_class& z, uint64_t x) +{ + bool b; + setArray(&b, z, &x, 1); + assert(b); + (void)b; +} +inline void setStr(bool *pb, mpz_class& z, const char *str, int base = 0) +{ +#ifdef MCL_USE_VINT + z.setStr(pb, str, base); +#else + *pb = z.set_str(str, base) == 0; +#endif +} + +/* + set buf with string terminated by '\0' + return strlen(buf) if success else 0 +*/ +inline size_t getStr(char *buf, size_t bufSize, const mpz_class& z, int base = 10) +{ +#ifdef MCL_USE_VINT + return z.getStr(buf, bufSize, base); +#else + __gmp_alloc_cstring tmp(mpz_get_str(0, base, z.get_mpz_t())); + size_t n = strlen(tmp.str); + if (n + 1 > bufSize) return 0; + memcpy(buf, tmp.str, n + 1); + return n; +#endif +} + +#ifndef CYBOZU_DONT_USE_STRING +inline void getStr(std::string& str, const mpz_class& z, int base = 10) +{ +#ifdef MCL_USE_VINT + z.getStr(str, base); +#else + str = z.get_str(base); +#endif +} +inline std::string getStr(const mpz_class& z, int base = 10) +{ + std::string s; + gmp::getStr(s, z, base); + return s; +} +#endif + +inline void add(mpz_class& z, const mpz_class& x, const mpz_class& y) +{ +#ifdef MCL_USE_VINT + Vint::add(z, x, y); +#else + mpz_add(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); +#endif +} +#ifndef MCL_USE_VINT +inline void add(mpz_class& z, const mpz_class& x, unsigned int y) +{ + mpz_add_ui(z.get_mpz_t(), x.get_mpz_t(), y); +} +inline void sub(mpz_class& z, const mpz_class& x, unsigned int y) +{ + mpz_sub_ui(z.get_mpz_t(), x.get_mpz_t(), y); +} +inline void mul(mpz_class& z, const mpz_class& x, unsigned int y) +{ + mpz_mul_ui(z.get_mpz_t(), x.get_mpz_t(), y); +} +inline void div(mpz_class& q, const mpz_class& x, unsigned int y) +{ + mpz_div_ui(q.get_mpz_t(), x.get_mpz_t(), y); +} +inline void mod(mpz_class& r, const mpz_class& x, unsigned int m) +{ + mpz_mod_ui(r.get_mpz_t(), x.get_mpz_t(), m); +} +inline int compare(const mpz_class& x, int y) +{ + return mpz_cmp_si(x.get_mpz_t(), y); +} +#endif +inline void sub(mpz_class& z, const mpz_class& x, const mpz_class& y) +{ +#ifdef MCL_USE_VINT + Vint::sub(z, x, y); +#else + mpz_sub(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); +#endif +} +inline void mul(mpz_class& z, const mpz_class& x, const mpz_class& y) +{ +#ifdef MCL_USE_VINT + Vint::mul(z, x, y); +#else + mpz_mul(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); +#endif +} +inline void sqr(mpz_class& z, const mpz_class& x) +{ +#ifdef MCL_USE_VINT + Vint::mul(z, x, x); +#else + mpz_mul(z.get_mpz_t(), x.get_mpz_t(), x.get_mpz_t()); +#endif +} +inline void divmod(mpz_class& q, mpz_class& r, const mpz_class& x, const mpz_class& y) +{ +#ifdef MCL_USE_VINT + Vint::divMod(&q, r, x, y); +#else + mpz_divmod(q.get_mpz_t(), r.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); +#endif +} +inline void div(mpz_class& q, const mpz_class& x, const mpz_class& y) +{ +#ifdef MCL_USE_VINT + Vint::div(q, x, y); +#else + mpz_div(q.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); +#endif +} +inline void mod(mpz_class& r, const mpz_class& x, const mpz_class& m) +{ +#ifdef MCL_USE_VINT + Vint::mod(r, x, m); +#else + mpz_mod(r.get_mpz_t(), x.get_mpz_t(), m.get_mpz_t()); +#endif +} +inline void clear(mpz_class& z) +{ +#ifdef MCL_USE_VINT + z.clear(); +#else + mpz_set_ui(z.get_mpz_t(), 0); +#endif +} +inline bool isZero(const mpz_class& z) +{ +#ifdef MCL_USE_VINT + return z.isZero(); +#else + return mpz_sgn(z.get_mpz_t()) == 0; +#endif +} +inline bool isNegative(const mpz_class& z) +{ +#ifdef MCL_USE_VINT + return z.isNegative(); +#else + return mpz_sgn(z.get_mpz_t()) < 0; +#endif +} +inline void neg(mpz_class& z, const mpz_class& x) +{ +#ifdef MCL_USE_VINT + Vint::neg(z, x); +#else + mpz_neg(z.get_mpz_t(), x.get_mpz_t()); +#endif +} +inline int compare(const mpz_class& x, const mpz_class & y) +{ +#ifdef MCL_USE_VINT + return Vint::compare(x, y); +#else + return mpz_cmp(x.get_mpz_t(), y.get_mpz_t()); +#endif +} +template<class T> +void addMod(mpz_class& z, const mpz_class& x, const T& y, const mpz_class& m) +{ + add(z, x, y); + if (compare(z, m) >= 0) { + sub(z, z, m); + } +} +template<class T> +void subMod(mpz_class& z, const mpz_class& x, const T& y, const mpz_class& m) +{ + sub(z, x, y); + if (!isNegative(z)) return; + add(z, z, m); +} +template<class T> +void mulMod(mpz_class& z, const mpz_class& x, const T& y, const mpz_class& m) +{ + mul(z, x, y); + mod(z, z, m); +} +inline void sqrMod(mpz_class& z, const mpz_class& x, const mpz_class& m) +{ + sqr(z, x); + mod(z, z, m); +} +// z = x^y (y >= 0) +inline void pow(mpz_class& z, const mpz_class& x, unsigned int y) +{ +#ifdef MCL_USE_VINT + Vint::pow(z, x, y); +#else + mpz_pow_ui(z.get_mpz_t(), x.get_mpz_t(), y); +#endif +} +// z = x^y mod m (y >=0) +inline void powMod(mpz_class& z, const mpz_class& x, const mpz_class& y, const mpz_class& m) +{ +#ifdef MCL_USE_VINT + Vint::powMod(z, x, y, m); +#else + mpz_powm(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t(), m.get_mpz_t()); +#endif +} +// z = 1/x mod m +inline void invMod(mpz_class& z, const mpz_class& x, const mpz_class& m) +{ +#ifdef MCL_USE_VINT + Vint::invMod(z, x, m); +#else + mpz_invert(z.get_mpz_t(), x.get_mpz_t(), m.get_mpz_t()); +#endif +} +// z = lcm(x, y) +inline void lcm(mpz_class& z, const mpz_class& x, const mpz_class& y) +{ +#ifdef MCL_USE_VINT + Vint::lcm(z, x, y); +#else + mpz_lcm(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); +#endif +} +inline mpz_class lcm(const mpz_class& x, const mpz_class& y) +{ + mpz_class z; + lcm(z, x, y); + return z; +} +// z = gcd(x, y) +inline void gcd(mpz_class& z, const mpz_class& x, const mpz_class& y) +{ +#ifdef MCL_USE_VINT + Vint::gcd(z, x, y); +#else + mpz_gcd(z.get_mpz_t(), x.get_mpz_t(), y.get_mpz_t()); +#endif +} +inline mpz_class gcd(const mpz_class& x, const mpz_class& y) +{ + mpz_class z; + gcd(z, x, y); + return z; +} +/* + assume p : odd prime + return 1 if x^2 = a mod p for some x + return -1 if x^2 != a mod p for any x +*/ +inline int legendre(const mpz_class& a, const mpz_class& p) +{ +#ifdef MCL_USE_VINT + return Vint::jacobi(a, p); +#else + return mpz_legendre(a.get_mpz_t(), p.get_mpz_t()); +#endif +} +inline bool isPrime(bool *pb, const mpz_class& x) +{ +#ifdef MCL_USE_VINT + return x.isPrime(pb, 32); +#else + *pb = true; + return mpz_probab_prime_p(x.get_mpz_t(), 32) != 0; +#endif +} +inline size_t getBitSize(const mpz_class& x) +{ +#ifdef MCL_USE_VINT + return x.getBitSize(); +#else + return mpz_sizeinbase(x.get_mpz_t(), 2); +#endif +} +inline bool testBit(const mpz_class& x, size_t pos) +{ +#ifdef MCL_USE_VINT + return x.testBit(pos); +#else + return mpz_tstbit(x.get_mpz_t(), pos) != 0; +#endif +} +inline void resetBit(mpz_class& x, size_t pos) +{ +#ifdef MCL_USE_VINT + x.setBit(pos, false); +#else + mpz_clrbit(x.get_mpz_t(), pos); +#endif +} +inline void setBit(mpz_class& x, size_t pos, bool v = true) +{ +#ifdef MCL_USE_VINT + x.setBit(pos, v); +#else + if (v) { + mpz_setbit(x.get_mpz_t(), pos); + } else { + resetBit(x, pos); + } +#endif +} +inline const fp::Unit *getUnit(const mpz_class& x) +{ +#ifdef MCL_USE_VINT + return x.getUnit(); +#else + return reinterpret_cast<const fp::Unit*>(x.get_mpz_t()->_mp_d); +#endif +} +inline fp::Unit getUnit(const mpz_class& x, size_t i) +{ + return getUnit(x)[i]; +} +inline size_t getUnitSize(const mpz_class& x) +{ +#ifdef MCL_USE_VINT + return x.getUnitSize(); +#else + return std::abs(x.get_mpz_t()->_mp_size); +#endif +} +inline mpz_class abs(const mpz_class& x) +{ +#ifdef MCL_USE_VINT + return Vint::abs(x); +#else + return ::abs(x); +#endif +} + +inline void getRand(bool *pb, mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen()) +{ + if (rg.isZero()) rg = fp::RandGen::get(); + assert(bitSize > 1); + const size_t rem = bitSize & 31; + const size_t n = (bitSize + 31) / 32; + uint32_t buf[128]; + assert(n <= CYBOZU_NUM_OF_ARRAY(buf)); + if (n > CYBOZU_NUM_OF_ARRAY(buf)) { + *pb = false; + return; + } + rg.read(buf, n * sizeof(buf[0])); + uint32_t v = buf[n - 1]; + if (rem == 0) { + v |= 1U << 31; + } else { + v &= (1U << rem) - 1; + v |= 1U << (rem - 1); + } + buf[n - 1] = v; + setArray(pb, z, buf, n); +} + +inline void getRandPrime(bool *pb, mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen(), bool setSecondBit = false, bool mustBe3mod4 = false) +{ + if (rg.isZero()) rg = fp::RandGen::get(); + assert(bitSize > 2); + for (;;) { + getRand(pb, z, bitSize, rg); + if (!*pb) return; + if (setSecondBit) { + z |= mpz_class(1) << (bitSize - 2); + } + if (mustBe3mod4) { + z |= 3; + } + bool ret = isPrime(pb, z); + if (!*pb) return; + if (ret) return; + } +} +inline mpz_class getQuadraticNonResidue(const mpz_class& p) +{ + mpz_class g = 2; + while (legendre(g, p) > 0) { + ++g; + } + return g; +} + +namespace impl { + +template<class Vec> +void convertToBinary(Vec& v, const mpz_class& x) +{ + const size_t len = gmp::getBitSize(x); + v.resize(len); + for (size_t i = 0; i < len; i++) { + v[i] = gmp::testBit(x, len - 1 - i) ? 1 : 0; + } +} + +template<class Vec> +size_t getContinuousVal(const Vec& v, size_t pos, int val) +{ + while (pos >= 2) { + if (v[pos] != val) break; + pos--; + } + return pos; +} + +template<class Vec> +void convertToNAF(Vec& v, const Vec& in) +{ + v.copy(in); + size_t pos = v.size() - 1; + for (;;) { + size_t p = getContinuousVal(v, pos, 0); + if (p == 1) return; + assert(v[p] == 1); + size_t q = getContinuousVal(v, p, 1); + if (q == 1) return; + assert(v[q] == 0); + if (p - q <= 1) { + pos = p - 1; + continue; + } + v[q] = 1; + for (size_t i = q + 1; i < p; i++) { + v[i] = 0; + } + v[p] = -1; + pos = q; + } +} + +template<class Vec> +size_t getNumOfNonZeroElement(const Vec& v) +{ + size_t w = 0; + for (size_t i = 0; i < v.size(); i++) { + if (v[i]) w++; + } + return w; +} + +} // impl + +/* + compute a repl of x which has smaller Hamming weights. + return true if naf is selected +*/ +template<class Vec> +bool getNAF(Vec& v, const mpz_class& x) +{ + Vec bin; + impl::convertToBinary(bin, x); + Vec naf; + impl::convertToNAF(naf, bin); + const size_t binW = impl::getNumOfNonZeroElement(bin); + const size_t nafW = impl::getNumOfNonZeroElement(naf); + if (nafW < binW) { + v.swap(naf); + return true; + } else { + v.swap(bin); + return false; + } +} + +#ifndef CYBOZU_DONT_USE_EXCEPTION +inline void setStr(mpz_class& z, const std::string& str, int base = 0) +{ + bool b; + setStr(&b, z, str.c_str(), base); + if (!b) throw cybozu::Exception("gmp:setStr"); +} +template<class T> +void setArray(mpz_class& z, const T *buf, size_t n) +{ + bool b; + setArray(&b, z, buf, n); + if (!b) throw cybozu::Exception("gmp:setArray"); +} +template<class T> +void getArray(T *buf, size_t maxSize, const mpz_class& x) +{ + bool b; + getArray(&b, buf, maxSize, x); + if (!b) throw cybozu::Exception("gmp:getArray"); +} +inline bool isPrime(const mpz_class& x) +{ + bool b; + bool ret = isPrime(&b, x); + if (!b) throw cybozu::Exception("gmp:isPrime"); + return ret; +} +inline void getRand(mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen()) +{ + bool b; + getRand(&b, z, bitSize, rg); + if (!b) throw cybozu::Exception("gmp:getRand"); +} +inline void getRandPrime(mpz_class& z, size_t bitSize, fp::RandGen rg = fp::RandGen(), bool setSecondBit = false, bool mustBe3mod4 = false) +{ + bool b; + getRandPrime(&b, z, bitSize, rg, setSecondBit, mustBe3mod4); + if (!b) throw cybozu::Exception("gmp:getRandPrime"); +} +#endif + + +} // mcl::gmp + +/* + Tonelli-Shanks +*/ +class SquareRoot { + bool isPrecomputed_; + bool isPrime; + mpz_class p; + mpz_class g; + int r; + mpz_class q; // p - 1 = 2^r q + mpz_class s; // s = g^q + mpz_class q_add_1_div_2; + struct Tbl { + const char *p; + const char *g; + int r; + const char *q; + const char *s; + const char *q_add_1_div_2; + }; + bool setIfPrecomputed(const mpz_class& p_) + { + static const Tbl tbl[] = { + { // BN254.p + "2523648240000001ba344d80000000086121000000000013a700000000000013", + "2", + 1, + "1291b24120000000dd1a26c0000000043090800000000009d380000000000009", + "2523648240000001ba344d80000000086121000000000013a700000000000012", + "948d920900000006e8d1360000000021848400000000004e9c0000000000005", + }, + { // BN254.r + "2523648240000001ba344d8000000007ff9f800000000010a10000000000000d", + "2", + 2, + "948d920900000006e8d136000000001ffe7e000000000042840000000000003", + "9366c4800000000555150000000000122400000000000015", + "4a46c9048000000374689b000000000fff3f000000000021420000000000002", + }, + { // BLS12_381,p + "1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", + "2", + 1, + "d0088f51cbff34d258dd3db21a5d66bb23ba5c279c2895fb39869507b587b120f55ffff58a9ffffdcff7fffffffd555", + "1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaaa", + "680447a8e5ff9a692c6e9ed90d2eb35d91dd2e13ce144afd9cc34a83dac3d8907aaffffac54ffffee7fbfffffffeaab", + }, + { // BLS12_381.r + "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", + "5", + 32, + "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff", + "212d79e5b416b6f0fd56dc8d168d6c0c4024ff270b3e0941b788f500b912f1f", + "39f6d3a994cebea4199cec0404d0ec02a9ded2017fff2dff80000000", + }, + }; + for (size_t i = 0; i < CYBOZU_NUM_OF_ARRAY(tbl); i++) { + mpz_class targetPrime; + bool b; + mcl::gmp::setStr(&b, targetPrime, tbl[i].p, 16); + if (!b) continue; + if (targetPrime != p_) continue; + isPrime = true; + p = p_; + mcl::gmp::setStr(&b, g, tbl[i].g, 16); + if (!b) continue; + r = tbl[i].r; + mcl::gmp::setStr(&b, q, tbl[i].q, 16); + if (!b) continue; + mcl::gmp::setStr(&b, s, tbl[i].s, 16); + if (!b) continue; + mcl::gmp::setStr(&b, q_add_1_div_2, tbl[i].q_add_1_div_2, 16); + if (!b) continue; + isPrecomputed_ = true; + return true; + } + return false; + } +public: + SquareRoot() { clear(); } + bool isPrecomputed() const { return isPrecomputed_; } + void clear() + { + isPrecomputed_ = false; + isPrime = false; + p = 0; + g = 0; + r = 0; + q = 0; + s = 0; + q_add_1_div_2 = 0; + } +#if !defined(CYBOZU_DONT_USE_USE_STRING) && !defined(CYBOZU_DONT_USE_EXCEPTION) + void dump() const + { + printf("\"%s\",\n", mcl::gmp::getStr(p, 16).c_str()); + printf("\"%s\",\n", mcl::gmp::getStr(g, 16).c_str()); + printf("%d,\n", r); + printf("\"%s\",\n", mcl::gmp::getStr(q, 16).c_str()); + printf("\"%s\",\n", mcl::gmp::getStr(s, 16).c_str()); + printf("\"%s\",\n", mcl::gmp::getStr(q_add_1_div_2, 16).c_str()); + } +#endif + void set(bool *pb, const mpz_class& _p, bool usePrecomputedTable = true) + { + if (usePrecomputedTable && setIfPrecomputed(_p)) { + *pb = true; + return; + } + p = _p; + if (p <= 2) { + *pb = false; + return; + } + isPrime = gmp::isPrime(pb, p); + if (!*pb) return; + if (!isPrime) { + *pb = false; + return; + } + g = gmp::getQuadraticNonResidue(p); + // p - 1 = 2^r q, q is odd + r = 0; + q = p - 1; + while ((q & 1) == 0) { + r++; + q /= 2; + } + gmp::powMod(s, g, q, p); + q_add_1_div_2 = (q + 1) / 2; + *pb = true; + } + /* + solve x^2 = a mod p + */ + bool get(mpz_class& x, const mpz_class& a) const + { + if (!isPrime) { + return false; + } + if (a == 0) { + x = 0; + return true; + } + if (gmp::legendre(a, p) < 0) return false; + if (r == 1) { + // (p + 1) / 4 = (q + 1) / 2 + gmp::powMod(x, a, q_add_1_div_2, p); + return true; + } + mpz_class c = s, d; + int e = r; + gmp::powMod(d, a, q, p); + gmp::powMod(x, a, q_add_1_div_2, p); // destroy a if &x == &a + mpz_class dd; + mpz_class b; + while (d != 1) { + int i = 1; + dd = d * d; dd %= p; + while (dd != 1) { + dd *= dd; dd %= p; + i++; + } + b = 1; + b <<= e - i - 1; + gmp::powMod(b, c, b, p); + x *= b; x %= p; + c = b * b; c %= p; + d *= c; d %= p; + e = i; + } + return true; + } + /* + solve x^2 = a in Fp + */ + template<class Fp> + bool get(Fp& x, const Fp& a) const + { + assert(Fp::getOp().mp == p); + if (a == 0) { + x = 0; + return true; + } + { + bool b; + mpz_class aa; + a.getMpz(&b, aa); + assert(b); + if (gmp::legendre(aa, p) < 0) return false; + } + if (r == 1) { + // (p + 1) / 4 = (q + 1) / 2 + Fp::pow(x, a, q_add_1_div_2); + return true; + } + Fp c, d; + { + bool b; + c.setMpz(&b, s); + assert(b); + } + int e = r; + Fp::pow(d, a, q); + Fp::pow(x, a, q_add_1_div_2); // destroy a if &x == &a + Fp dd; + Fp b; + while (!d.isOne()) { + int i = 1; + Fp::sqr(dd, d); + while (!dd.isOne()) { + dd *= dd; + i++; + } + b = 1; +// b <<= e - i - 1; + for (int j = 0; j < e - i - 1; j++) { + b += b; + } + Fp::pow(b, c, b); + x *= b; + Fp::sqr(c, b); + d *= c; + e = i; + } + return true; + } + bool operator==(const SquareRoot& rhs) const + { + return isPrime == rhs.isPrime && p == rhs.p && g == rhs.g && r == rhs.r + && q == rhs.q && s == rhs.s && q_add_1_div_2 == rhs.q_add_1_div_2; + } + bool operator!=(const SquareRoot& rhs) const { return !operator==(rhs); } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void set(const mpz_class& _p) + { + bool b; + set(&b, _p); + if (!b) throw cybozu::Exception("gmp:SquareRoot:set"); + } +#endif +}; + +} // mcl diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/lagrange.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/lagrange.hpp new file mode 100644 index 000000000..7c0218896 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/lagrange.hpp @@ -0,0 +1,97 @@ +#pragma once +/** + @file + @brief Lagrange Interpolation + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +namespace mcl { + +/* + recover out = f(0) by { (x, y) | x = S[i], y = f(x) = vec[i] } + @retval 0 if succeed else -1 +*/ +template<class G, class F> +void LagrangeInterpolation(bool *pb, G& out, const F *S, const G *vec, size_t k) +{ + /* + delta_{i,S}(0) = prod_{j != i} S[j] / (S[j] - S[i]) = a / b + where a = prod S[j], b = S[i] * prod_{j != i} (S[j] - S[i]) + */ + if (k < 2) { + *pb = false; + return; + } + F a = S[0]; + for (size_t i = 1; i < k; i++) { + a *= S[i]; + } + if (a.isZero()) { + *pb = false; + return; + } + /* + f(0) = sum_i f(S[i]) delta_{i,S}(0) + */ + G r; + r.clear(); + for (size_t i = 0; i < k; i++) { + F b = S[i]; + for (size_t j = 0; j < k; j++) { + if (j != i) { + F v = S[j] - S[i]; + if (v.isZero()) { + *pb = false; + return; + } + b *= v; + } + } + G t; + G::mul(t, vec[i], a / b); + r += t; + } + out = r; + *pb = true; +} + +/* + out = f(x) = c[0] + c[1] * x + c[2] * x^2 + ... + c[cSize - 1] * x^(cSize - 1) + @retval 0 if succeed else -1 +*/ +template<class G, class T> +void evaluatePolynomial(bool *pb, G& out, const G *c, size_t cSize, const T& x) +{ + if (cSize < 2) { + *pb = false; + return; + } + G y = c[cSize - 1]; + for (int i = (int)cSize - 2; i >= 0; i--) { + G::mul(y, y, x); + G::add(y, y, c[i]); + } + out = y; + *pb = true; +} + +#ifndef CYBOZU_DONT_USE_EXCEPTION +template<class G, class F> +void LagrangeInterpolation(G& out, const F *S, const G *vec, size_t k) +{ + bool b; + LagrangeInterpolation(&b, out, S, vec, k); + if (!b) throw cybozu::Exception("LagrangeInterpolation"); +} + +template<class G, class T> +void evaluatePolynomial(G& out, const G *c, size_t cSize, const T& x) +{ + bool b; + evaluatePolynomial(&b, out, c, cSize, x); + if (!b) throw cybozu::Exception("evaluatePolynomial"); +} +#endif + +} // mcl diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/op.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/op.hpp new file mode 100644 index 000000000..d108d1a55 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/op.hpp @@ -0,0 +1,381 @@ +#pragma once +/** + @file + @brief definition of Op + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <mcl/gmp_util.hpp> +#include <memory.h> +#include <mcl/array.hpp> + +#ifndef MCL_MAX_BIT_SIZE + #define MCL_MAX_BIT_SIZE 521 +#endif +#if defined(__EMSCRIPTEN__) || defined(__wasm__) + #define MCL_DONT_USE_XBYAK + #define MCL_DONT_USE_OPENSSL +#endif +#if !defined(MCL_DONT_USE_XBYAK) && (defined(_WIN64) || defined(__x86_64__)) && (MCL_SIZEOF_UNIT == 8) + #define MCL_USE_XBYAK + #define MCL_XBYAK_DIRECT_CALL +#endif + +#define MCL_MAX_HASH_BIT_SIZE 512 + +namespace mcl { + +/* + specifies available string format mode for X::setIoMode() + // for Fp, Fp2, Fp6, Fp12 + default(0) : IoDec + printable string(zero terminated, variable size) + IoBin(2) | IoDec(10) | IoHex(16) | IoBinPrefix | IoHexPrefix + + byte string(not zero terminated, fixed size) + IoArray | IoArrayRaw + IoArray = IoSerialize + + // for Ec + affine(0) | IoEcCompY | IoComp + default : affine + + affine and IoEcCompY are available with ioMode for Fp + IoSerialize ignores ioMode for Fp + + IoAuto + dec or hex according to ios_base::fmtflags + IoBin + binary number([01]+) + IoDec + decimal number + IoHex + hexadecimal number([0-9a-fA-F]+) + IoBinPrefix + 0b + <binary number> + IoHexPrefix + 0x + <hexadecimal number> + IoArray + array of Unit(fixed size = Fp::getByteSize()) + IoArrayRaw + array of Unit(fixed size = Fp::getByteSize()) without Montgomery convresion + + // for Ec::setIoMode() + IoEcAffine(default) + "0" ; infinity + "1 <x> <y>" ; affine coordinate + + IoEcProj + "4" <x> <y> <z> ; projective or jacobi coordinate + + IoEcCompY + 1-bit y prepresentation of elliptic curve + "2 <x>" ; compressed for even y + "3 <x>" ; compressed for odd y + + IoSerialize + if isMSBserialize(): // p is not full bit + size = Fp::getByteSize() + use MSB of array of x for 1-bit y for prime p where (p % 8 != 0) + [0] ; infinity + <x> ; for even y + <x>|1 ; for odd y ; |1 means set MSB of x + else: + size = Fp::getByteSize() + 1 + [0] ; infinity + 2 <x> ; for even y + 3 <x> ; for odd y +*/ +enum IoMode { + IoAuto = 0, // dec or hex according to ios_base::fmtflags + IoBin = 2, // binary number without prefix + IoDec = 10, // decimal number without prefix + IoHex = 16, // hexadecimal number without prefix + IoArray = 32, // array of Unit(fixed size) + IoArrayRaw = 64, // raw array of Unit without Montgomery conversion + IoPrefix = 128, // append '0b'(bin) or '0x'(hex) + IoBinPrefix = IoBin | IoPrefix, + IoHexPrefix = IoHex | IoPrefix, + IoEcAffine = 0, // affine coordinate + IoEcCompY = 256, // 1-bit y representation of elliptic curve + IoSerialize = 512, // use MBS for 1-bit y + IoFixedSizeByteSeq = IoSerialize, // obsolete + IoEcProj = 1024, // projective or jacobi coordinate + IoSerializeHexStr = 2048 // printable hex string +}; + +namespace fp { + +const size_t UnitBitSize = sizeof(Unit) * 8; + +const size_t maxUnitSize = (MCL_MAX_BIT_SIZE + UnitBitSize - 1) / UnitBitSize; +#define MCL_MAX_UNIT_SIZE ((MCL_MAX_BIT_SIZE + MCL_UNIT_BIT_SIZE - 1) / MCL_UNIT_BIT_SIZE) + +struct FpGenerator; +struct Op; + +typedef void (*void1u)(Unit*); +typedef void (*void2u)(Unit*, const Unit*); +typedef void (*void2uI)(Unit*, const Unit*, Unit); +typedef void (*void2uIu)(Unit*, const Unit*, Unit, const Unit*); +typedef void (*void2uOp)(Unit*, const Unit*, const Op&); +typedef void (*void3u)(Unit*, const Unit*, const Unit*); +typedef void (*void4u)(Unit*, const Unit*, const Unit*, const Unit*); +typedef int (*int2u)(Unit*, const Unit*); + +typedef Unit (*u1uII)(Unit*, Unit, Unit); +typedef Unit (*u3u)(Unit*, const Unit*, const Unit*); + +struct Block { + const Unit *p; // pointer to original FpT.v_ + size_t n; + Unit v_[maxUnitSize]; +}; + +enum Mode { + FP_AUTO, + FP_GMP, + FP_GMP_MONT, + FP_LLVM, + FP_LLVM_MONT, + FP_XBYAK +}; + +enum PrimeMode { + PM_GENERIC = 0, + PM_NIST_P192, + PM_SECP256K1, + PM_NIST_P521 +}; + +enum MaskMode { + NoMask = 0, // throw if greater or equal + SmallMask = 1, // 1-bit smaller mask if greater or equal + MaskAndMod = 2 // mask and substract if greater or equal +}; + +struct Op { + /* + don't change the layout of rp and p + asm code assumes &rp + 1 == p + */ + Unit rp; + Unit p[maxUnitSize]; + mpz_class mp; + uint32_t pmod4; + mcl::SquareRoot sq; + Unit half[maxUnitSize]; // (p + 1) / 2 + Unit oneRep[maxUnitSize]; // 1(=inv R if Montgomery) + /* + for Montgomery + one = 1 + R = (1 << (N * sizeof(Unit) * 8)) % p + R2 = (R * R) % p + R3 = RR^3 + */ + Unit one[maxUnitSize]; + Unit R2[maxUnitSize]; + Unit R3[maxUnitSize]; +#ifdef MCL_USE_XBYAK + FpGenerator *fg; + mcl::Array<Unit> invTbl; +#endif + void3u fp_addA_; + void3u fp_subA_; + void2u fp_negA_; + void3u fp_mulA_; + void2u fp_sqrA_; + void3u fp2_addA_; + void3u fp2_subA_; + void2u fp2_negA_; + void3u fp2_mulA_; + void2u fp2_sqrA_; + void3u fpDbl_addA_; + void3u fpDbl_subA_; + void3u fpDbl_addPreA_; + void3u fpDbl_subPreA_; + void3u fpDbl_mulPreA_; + void2u fpDbl_sqrPreA_; + void2u fpDbl_modA_; + void3u fp2Dbl_mulPreA_; + void2u fp2Dbl_sqrPreA_; + size_t maxN; + size_t N; + size_t bitSize; + bool (*fp_isZero)(const Unit*); + void1u fp_clear; + void2u fp_copy; + void2u fp_shr1; + void3u fp_neg; + void4u fp_add; + void4u fp_sub; + void4u fp_mul; + void3u fp_sqr; + void2uOp fp_invOp; + void2uIu fp_mulUnit; // fpN1_mod + fp_mulUnitPre + + void3u fpDbl_mulPre; + void2u fpDbl_sqrPre; + int2u fp_preInv; + void2uI fp_mulUnitPre; // z[N + 1] = x[N] * y + void3u fpN1_mod; // y[N] = x[N + 1] % p[N] + + void4u fpDbl_add; + void4u fpDbl_sub; + void3u fpDbl_mod; + + u3u fp_addPre; // without modulo p + u3u fp_subPre; // without modulo p + u3u fpDbl_addPre; + u3u fpDbl_subPre; + /* + for Fp2 = F[u] / (u^2 + 1) + x = a + bu + */ + int xi_a; // xi = xi_a + u + void4u fp2_mulNF; + void2u fp2_inv; + void2u fp2_mul_xiA_; + uint32_t (*hash)(void *out, uint32_t maxOutSize, const void *msg, uint32_t msgSize); + + PrimeMode primeMode; + bool isFullBit; // true if bitSize % uniSize == 0 + bool isMont; // true if use Montgomery + bool isFastMod; // true if modulo is fast + + Op() + { + clear(); + } + ~Op() + { +#ifdef MCL_USE_XBYAK + destroyFpGenerator(fg); +#endif + } + void clear() + { + rp = 0; + memset(p, 0, sizeof(p)); + mp = 0; + pmod4 = 0; + sq.clear(); + // fg is not set + memset(half, 0, sizeof(half)); + memset(oneRep, 0, sizeof(oneRep)); + memset(one, 0, sizeof(one)); + memset(R2, 0, sizeof(R2)); + memset(R3, 0, sizeof(R3)); +#ifdef MCL_USE_XBYAK + invTbl.clear(); +#endif + fp_addA_ = 0; + fp_subA_ = 0; + fp_negA_ = 0; + fp_mulA_ = 0; + fp_sqrA_ = 0; + fp2_addA_ = 0; + fp2_subA_ = 0; + fp2_negA_ = 0; + fp2_mulA_ = 0; + fp2_sqrA_ = 0; + fpDbl_addA_ = 0; + fpDbl_subA_ = 0; + fpDbl_addPreA_ = 0; + fpDbl_subPreA_ = 0; + fpDbl_mulPreA_ = 0; + fpDbl_sqrPreA_ = 0; + fpDbl_modA_ = 0; + fp2Dbl_mulPreA_ = 0; + fp2Dbl_sqrPreA_ = 0; + maxN = 0; + N = 0; + bitSize = 0; + fp_isZero = 0; + fp_clear = 0; + fp_copy = 0; + fp_shr1 = 0; + fp_neg = 0; + fp_add = 0; + fp_sub = 0; + fp_mul = 0; + fp_sqr = 0; + fp_invOp = 0; + fp_mulUnit = 0; + + fpDbl_mulPre = 0; + fpDbl_sqrPre = 0; + fp_preInv = 0; + fp_mulUnitPre = 0; + fpN1_mod = 0; + + fpDbl_add = 0; + fpDbl_sub = 0; + fpDbl_mod = 0; + + fp_addPre = 0; + fp_subPre = 0; + fpDbl_addPre = 0; + fpDbl_subPre = 0; + + xi_a = 0; + fp2_mulNF = 0; + fp2_inv = 0; + fp2_mul_xiA_ = 0; + + primeMode = PM_GENERIC; + isFullBit = false; + isMont = false; + isFastMod = false; + hash = 0; + } + void fromMont(Unit* y, const Unit *x) const + { + /* + M(x, y) = xyR^-1 + y = M(x, 1) = xR^-1 + */ + fp_mul(y, x, one, p); + } + void toMont(Unit* y, const Unit *x) const + { + /* + y = M(x, R2) = xR^2 R^-1 = xR + */ + fp_mul(y, x, R2, p); + } + bool init(const mpz_class& p, size_t maxBitSize, Mode mode, size_t mclMaxBitSize = MCL_MAX_BIT_SIZE); + void initFp2(int xi_a); +#ifdef MCL_USE_XBYAK + static FpGenerator* createFpGenerator(); + static void destroyFpGenerator(FpGenerator *fg); +#endif +private: + Op(const Op&); + void operator=(const Op&); +}; + +inline const char* getIoSeparator(int ioMode) +{ + return (ioMode & (IoArray | IoArrayRaw | IoSerialize | IoSerializeHexStr)) ? "" : " "; +} + +inline void dump(const char *s, size_t n) +{ + for (size_t i = 0; i < n; i++) { + printf("%02x ", (uint8_t)s[i]); + } + printf("\n"); +} + +#ifndef CYBOZU_DONT_USE_STRING +int detectIoMode(int ioMode, const std::ios_base& ios); + +inline void dump(const std::string& s) +{ + dump(s.c_str(), s.size()); +} +#endif + +} } // mcl::fp diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/operator.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/operator.hpp new file mode 100644 index 000000000..e9bc506df --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/operator.hpp @@ -0,0 +1,177 @@ +#pragma once +/** + @file + @brief operator class + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <mcl/op.hpp> +#include <mcl/util.hpp> +#ifdef _MSC_VER + #ifndef MCL_FORCE_INLINE + #define MCL_FORCE_INLINE __forceinline + #endif + #pragma warning(push) + #pragma warning(disable : 4714) +#else + #ifndef MCL_FORCE_INLINE + #define MCL_FORCE_INLINE __attribute__((always_inline)) + #endif +#endif + +namespace mcl { namespace fp { + +template<class T> +struct Empty {}; + +/* + T must have add, sub, mul, inv, neg +*/ +template<class T, class E = Empty<T> > +struct Operator : public E { + template<class S> MCL_FORCE_INLINE T& operator+=(const S& rhs) { T::add(static_cast<T&>(*this), static_cast<const T&>(*this), rhs); return static_cast<T&>(*this); } + template<class S> MCL_FORCE_INLINE T& operator-=(const S& rhs) { T::sub(static_cast<T&>(*this), static_cast<const T&>(*this), rhs); return static_cast<T&>(*this); } + template<class S> friend MCL_FORCE_INLINE T operator+(const T& a, const S& b) { T c; T::add(c, a, b); return c; } + template<class S> friend MCL_FORCE_INLINE T operator-(const T& a, const S& b) { T c; T::sub(c, a, b); return c; } + template<class S> MCL_FORCE_INLINE T& operator*=(const S& rhs) { T::mul(static_cast<T&>(*this), static_cast<const T&>(*this), rhs); return static_cast<T&>(*this); } + template<class S> friend MCL_FORCE_INLINE T operator*(const T& a, const S& b) { T c; T::mul(c, a, b); return c; } + MCL_FORCE_INLINE T& operator/=(const T& rhs) { T c; T::inv(c, rhs); T::mul(static_cast<T&>(*this), static_cast<const T&>(*this), c); return static_cast<T&>(*this); } + static MCL_FORCE_INLINE void div(T& c, const T& a, const T& b) { T t; T::inv(t, b); T::mul(c, a, t); } + friend MCL_FORCE_INLINE T operator/(const T& a, const T& b) { T c; T::inv(c, b); c *= a; return c; } + MCL_FORCE_INLINE T operator-() const { T c; T::neg(c, static_cast<const T&>(*this)); return c; } + template<class tag2, size_t maxBitSize2, template<class _tag, size_t _maxBitSize> class FpT> + static void pow(T& z, const T& x, const FpT<tag2, maxBitSize2>& y) + { + fp::Block b; + y.getBlock(b); + powArray(z, x, b.p, b.n, false, false); + } + template<class tag2, size_t maxBitSize2, template<class _tag, size_t _maxBitSize> class FpT> + static void powGeneric(T& z, const T& x, const FpT<tag2, maxBitSize2>& y) + { + fp::Block b; + y.getBlock(b); + powArrayBase(z, x, b.p, b.n, false, false); + } + template<class tag2, size_t maxBitSize2, template<class _tag, size_t _maxBitSize> class FpT> + static void powCT(T& z, const T& x, const FpT<tag2, maxBitSize2>& y) + { + fp::Block b; + y.getBlock(b); + powArray(z, x, b.p, b.n, false, true); + } + static void pow(T& z, const T& x, int64_t y) + { + const uint64_t u = fp::abs_(y); +#if MCL_SIZEOF_UNIT == 8 + powArray(z, x, &u, 1, y < 0, false); +#else + uint32_t ua[2] = { uint32_t(u), uint32_t(u >> 32) }; + size_t un = ua[1] ? 2 : 1; + powArray(z, x, ua, un, y < 0, false); +#endif + } + static void pow(T& z, const T& x, const mpz_class& y) + { + powArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, false); + } + static void powGeneric(T& z, const T& x, const mpz_class& y) + { + powArrayBase(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, false); + } + static void powCT(T& z, const T& x, const mpz_class& y) + { + powArray(z, x, gmp::getUnit(y), gmp::getUnitSize(y), y < 0, true); + } + static void setPowArrayGLV(void f(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime)) + { + powArrayGLV = f; + } +private: + static void (*powArrayGLV)(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime); + static void powArray(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime) + { + if (powArrayGLV && (constTime || yn > 1)) { + powArrayGLV(z, x, y, yn, isNegative, constTime); + return; + } + powArrayBase(z, x, y, yn, isNegative, constTime); + } + static void powArrayBase(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime) + { + T tmp; + const T *px = &x; + if (&z == &x) { + tmp = x; + px = &tmp; + } + z = 1; + fp::powGeneric(z, *px, y, yn, T::mul, T::sqr, (void (*)(T&, const T&))0, constTime ? T::BaseFp::getBitSize() : 0); + if (isNegative) { + T::inv(z, z); + } + } +}; + +template<class T, class E> +void (*Operator<T, E>::powArrayGLV)(T& z, const T& x, const Unit *y, size_t yn, bool isNegative, bool constTime); + +/* + T must have save and load +*/ +template<class T, class E = Empty<T> > +struct Serializable : public E { + void setStr(bool *pb, const char *str, int ioMode = 0) + { + size_t len = strlen(str); + size_t n = deserialize(str, len, ioMode); + *pb = n > 0 && n == len; + } + // return strlen(buf) if success else 0 + size_t getStr(char *buf, size_t maxBufSize, int ioMode = 0) const + { + size_t n = serialize(buf, maxBufSize, ioMode); + if (n == 0 || n == maxBufSize - 1) return 0; + buf[n] = '\0'; + return n; + } +#ifndef CYBOZU_DONT_USE_STRING + void setStr(const std::string& str, int ioMode = 0) + { + cybozu::StringInputStream is(str); + static_cast<T&>(*this).load(is, ioMode); + } + void getStr(std::string& str, int ioMode = 0) const + { + str.clear(); + cybozu::StringOutputStream os(str); + static_cast<const T&>(*this).save(os, ioMode); + } + std::string getStr(int ioMode = 0) const + { + std::string str; + getStr(str, ioMode); + return str; + } +#endif + // return written bytes + size_t serialize(void *buf, size_t maxBufSize, int ioMode = IoSerialize) const + { + cybozu::MemoryOutputStream os(buf, maxBufSize); + bool b; + static_cast<const T&>(*this).save(&b, os, ioMode); + return b ? os.getPos() : 0; + } + // return read bytes + size_t deserialize(const void *buf, size_t bufSize, int ioMode = IoSerialize) + { + cybozu::MemoryInputStream is(buf, bufSize); + bool b; + static_cast<T&>(*this).load(&b, is, ioMode); + return b ? is.getPos() : 0; + } +}; + +} } // mcl::fp + diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/paillier.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/paillier.hpp new file mode 100644 index 000000000..03e44cb16 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/paillier.hpp @@ -0,0 +1,84 @@ +#pragma once +/** + @file + @brief paillier encryption + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <mcl/gmp_util.hpp> + +namespace mcl { namespace paillier { + +class PublicKey { + size_t primeBitSize; + mpz_class g; + mpz_class n; + mpz_class n2; +public: + PublicKey() : primeBitSize(0) {} + void init(size_t _primeBitSize, const mpz_class& _n) + { + primeBitSize = _primeBitSize; + n = _n; + g = 1 + _n; + n2 = _n * _n; + } + void enc(mpz_class& c, const mpz_class& m, mcl::fp::RandGen rg = mcl::fp::RandGen()) const + { + if (rg.isZero()) rg = mcl::fp::RandGen::get(); + if (primeBitSize == 0) throw cybozu::Exception("paillier:PublicKey:not init"); + mpz_class r; + mcl::gmp::getRand(r, primeBitSize, rg); + mpz_class a, b; + mcl::gmp::powMod(a, g, m, n2); + mcl::gmp::powMod(b, r, n, n2); + c = (a * b) % n2; + } + /* + additive homomorphic encryption + cz = cx + cy + */ + void add(mpz_class& cz, mpz_class& cx, mpz_class& cy) const + { + cz = (cx * cy) % n2; + } +}; + +class SecretKey { + size_t primeBitSize; + mpz_class n; + mpz_class n2; + mpz_class lambda; + mpz_class invLambda; +public: + SecretKey() : primeBitSize(0) {} + /* + the size of prime is half of bitSize + */ + void init(size_t bitSize, mcl::fp::RandGen rg = mcl::fp::RandGen()) + { + if (rg.isZero()) rg = mcl::fp::RandGen::get(); + primeBitSize = bitSize / 2; + mpz_class p, q; + mcl::gmp::getRandPrime(p, primeBitSize, rg); + mcl::gmp::getRandPrime(q, primeBitSize, rg); + lambda = (p - 1) * (q - 1); + n = p * q; + n2 = n * n; + mcl::gmp::invMod(invLambda, lambda, n); + } + void getPublicKey(PublicKey& pub) const + { + pub.init(primeBitSize, n); + } + void dec(mpz_class& m, const mpz_class& c) const + { + mpz_class L; + mcl::gmp::powMod(L, c, lambda, n2); + L = ((L - 1) / n) % n; + m = (L * invLambda) % n; + } +}; + +} } // mcl::paillier diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/randgen.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/randgen.hpp new file mode 100644 index 000000000..4bfb30b03 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/randgen.hpp @@ -0,0 +1,126 @@ +#pragma once +/** + @file + @brief definition of Op + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#ifdef MCL_DONT_USE_CSPRNG + +// nothing + +#elif defined(MCL_USE_WEB_CRYPTO_API) +#include <emscripten.h> + +namespace mcl { +struct RandomGeneratorJS { + void read(void *buf, size_t bufSize) + { + // use crypto.getRandomValues + EM_ASM({Module.cryptoGetRandomValues($0, $1)}, buf, bufSize); + } +}; +} // mcl + +#else +#include <cybozu/random_generator.hpp> +#if 0 // #if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 +#include <random> +#endif +#endif +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4521) +#endif +namespace mcl { namespace fp { + +namespace local { + +template<class RG> +void readWrapper(void *self, void *buf, uint32_t bufSize) +{ + reinterpret_cast<RG*>(self)->read((uint8_t*)buf, bufSize); +} + +#if 0 // #if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 +template<> +inline void readWrapper<std::random_device>(void *self, void *buf, uint32_t bufSize) +{ + std::random_device& rg = *reinterpret_cast<std::random_device*>(self); + uint8_t *p = reinterpret_cast<uint8_t*>(buf); + uint32_t v; + while (bufSize >= 4) { + v = rg(); + memcpy(p, &v, 4); + p += 4; + bufSize -= 4; + } + if (bufSize > 0) { + v = rg(); + memcpy(p, &v, bufSize); + } +} +#endif +} // local +/* + wrapper of cryptographically secure pseudo random number generator +*/ +class RandGen { + typedef void (*readFuncType)(void *self, void *buf, uint32_t bufSize); + void *self_; + readFuncType readFunc_; +public: + RandGen() : self_(0), readFunc_(0) {} + RandGen(void *self, readFuncType readFunc) : self_(self) , readFunc_(readFunc) {} + RandGen(const RandGen& rhs) : self_(rhs.self_), readFunc_(rhs.readFunc_) {} + RandGen(RandGen& rhs) : self_(rhs.self_), readFunc_(rhs.readFunc_) {} + RandGen& operator=(const RandGen& rhs) + { + self_ = rhs.self_; + readFunc_ = rhs.readFunc_; + return *this; + } + template<class RG> + RandGen(RG& rg) + : self_(reinterpret_cast<void*>(&rg)) + , readFunc_(local::readWrapper<RG>) + { + } + void read(void *out, size_t byteSize) + { + readFunc_(self_, out, static_cast<uint32_t>(byteSize)); + } +#ifdef MCL_DONT_USE_CSPRNG + bool isZero() const { return false; } /* return false to avoid copying default rg */ +#else + bool isZero() const { return self_ == 0 && readFunc_ == 0; } +#endif + static RandGen& get() + { +#ifdef MCL_DONT_USE_CSPRNG + static RandGen wrg; +#elif defined(MCL_USE_WEB_CRYPTO_API) + static mcl::RandomGeneratorJS rg; + static RandGen wrg(rg); +#else + static cybozu::RandomGenerator rg; + static RandGen wrg(rg); +#endif + return wrg; + } + /* + rg must be thread safe + rg.read(void *buf, size_t bufSize); + */ + static void setRandGen(const RandGen& rg) + { + get() = rg; + } +}; + +} } // mcl::fp + +#ifdef _MSC_VER + #pragma warning(pop) +#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/she.h b/vendor/github.com/dexon-foundation/mcl/include/mcl/she.h new file mode 100644 index 000000000..60b399c65 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/she.h @@ -0,0 +1,270 @@ +#pragma once +/** + @file + @brief C api of somewhat homomorphic encryption with one-time multiplication, based on prime-order pairings + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <mcl/bn.h> + +#ifdef _MSC_VER +#ifdef MCLSHE_DLL_EXPORT +#define MCLSHE_DLL_API __declspec(dllexport) +#else +#define MCLSHE_DLL_API __declspec(dllimport) +#ifndef MCLSHE_NO_AUTOLINK + #if MCLBN_FP_UNIT_SIZE == 4 + #pragma comment(lib, "mclshe256.lib") + #elif MCLBN_FP_UNIT_SIZE == 6 + #pragma comment(lib, "mclshe384.lib") + #else + #pragma comment(lib, "mclshe512.lib") + #endif +#endif +#endif +#else +#ifdef __EMSCRIPTEN__ + #define MCLSHE_DLL_API __attribute__((used)) +#elif defined(__wasm__) + #define MCLSHE_DLL_API __attribute__((visibility("default"))) +#else + #define MCLSHE_DLL_API +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + mclBnFr x; + mclBnFr y; +} sheSecretKey; + +typedef struct { + mclBnG1 xP; + mclBnG2 yQ; +} shePublicKey; + +struct shePrecomputedPublicKey; + +typedef struct { + mclBnG1 S; + mclBnG1 T; +} sheCipherTextG1; + +typedef struct { + mclBnG2 S; + mclBnG2 T; +} sheCipherTextG2; + +typedef struct { + mclBnGT g[4]; +} sheCipherTextGT; + +typedef struct { + mclBnFr d[4]; +} sheZkpBin; + +typedef struct { + mclBnFr d[4]; +} sheZkpEq; + +typedef struct { + mclBnFr d[7]; +} sheZkpBinEq; +/* + initialize this library + call this once before using the other functions + @param curve [in] enum value defined in mcl/bn.h + @param compiledTimeVar [in] specify MCLBN_COMPILED_TIME_VAR, + which macro is used to make sure that the values + are the same when the library is built and used + @return 0 if success + @note sheInit() is thread safe and serialized if it is called simultaneously + but don't call it while using other functions. +*/ +MCLSHE_DLL_API int sheInit(int curve, int compiledTimeVar); + +// return written byte size if success else 0 +MCLSHE_DLL_API mclSize sheSecretKeySerialize(void *buf, mclSize maxBufSize, const sheSecretKey *sec); +MCLSHE_DLL_API mclSize shePublicKeySerialize(void *buf, mclSize maxBufSize, const shePublicKey *pub); +MCLSHE_DLL_API mclSize sheCipherTextG1Serialize(void *buf, mclSize maxBufSize, const sheCipherTextG1 *c); +MCLSHE_DLL_API mclSize sheCipherTextG2Serialize(void *buf, mclSize maxBufSize, const sheCipherTextG2 *c); +MCLSHE_DLL_API mclSize sheCipherTextGTSerialize(void *buf, mclSize maxBufSize, const sheCipherTextGT *c); +MCLSHE_DLL_API mclSize sheZkpBinSerialize(void *buf, mclSize maxBufSize, const sheZkpBin *zkp); +MCLSHE_DLL_API mclSize sheZkpEqSerialize(void *buf, mclSize maxBufSize, const sheZkpEq *zkp); +MCLSHE_DLL_API mclSize sheZkpBinEqSerialize(void *buf, mclSize maxBufSize, const sheZkpBinEq *zkp); + +// return read byte size if sucess else 0 +MCLSHE_DLL_API mclSize sheSecretKeyDeserialize(sheSecretKey* sec, const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize shePublicKeyDeserialize(shePublicKey* pub, const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheCipherTextG1Deserialize(sheCipherTextG1* c, const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheCipherTextG2Deserialize(sheCipherTextG2* c, const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheCipherTextGTDeserialize(sheCipherTextGT* c, const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheZkpBinDeserialize(sheZkpBin* zkp, const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheZkpEqDeserialize(sheZkpEq* zkp, const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheZkpBinEqDeserialize(sheZkpBinEq* zkp, const void *buf, mclSize bufSize); + +/* + set secretKey if system has /dev/urandom or CryptGenRandom + return 0 if success +*/ +MCLSHE_DLL_API int sheSecretKeySetByCSPRNG(sheSecretKey *sec); + +MCLSHE_DLL_API void sheGetPublicKey(shePublicKey *pub, const sheSecretKey *sec); + +/* + make table to decode DLP + return 0 if success +*/ +MCLSHE_DLL_API int sheSetRangeForDLP(mclSize hashSize); +MCLSHE_DLL_API int sheSetRangeForG1DLP(mclSize hashSize); +MCLSHE_DLL_API int sheSetRangeForG2DLP(mclSize hashSize); +MCLSHE_DLL_API int sheSetRangeForGTDLP(mclSize hashSize); + +/* + set tryNum to decode DLP +*/ +MCLSHE_DLL_API void sheSetTryNum(mclSize tryNum); + +/* + decode G1 via GT if use != 0 + @note faster if tryNum >= 300 +*/ +MCLSHE_DLL_API void sheUseDecG1ViaGT(int use); +/* + decode G2 via GT if use != 0 + @note faster if tryNum >= 100 +*/ +MCLSHE_DLL_API void sheUseDecG2ViaGT(int use); +/* + load table for DLP + return read size if success else 0 +*/ +MCLSHE_DLL_API mclSize sheLoadTableForG1DLP(const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheLoadTableForG2DLP(const void *buf, mclSize bufSize); +MCLSHE_DLL_API mclSize sheLoadTableForGTDLP(const void *buf, mclSize bufSize); + +/* + save table for DLP + return written size if success else 0 +*/ +MCLSHE_DLL_API mclSize sheSaveTableForG1DLP(void *buf, mclSize maxBufSize); +MCLSHE_DLL_API mclSize sheSaveTableForG2DLP(void *buf, mclSize maxBufSize); +MCLSHE_DLL_API mclSize sheSaveTableForGTDLP(void *buf, mclSize maxBufSize); + +// return 0 if success +MCLSHE_DLL_API int sheEncG1(sheCipherTextG1 *c, const shePublicKey *pub, mclInt m); +MCLSHE_DLL_API int sheEncG2(sheCipherTextG2 *c, const shePublicKey *pub, mclInt m); +MCLSHE_DLL_API int sheEncGT(sheCipherTextGT *c, const shePublicKey *pub, mclInt m); +MCLSHE_DLL_API int shePrecomputedPublicKeyEncG1(sheCipherTextG1 *c, const shePrecomputedPublicKey *ppub, mclInt m); +MCLSHE_DLL_API int shePrecomputedPublicKeyEncG2(sheCipherTextG2 *c, const shePrecomputedPublicKey *ppub, mclInt m); +MCLSHE_DLL_API int shePrecomputedPublicKeyEncGT(sheCipherTextGT *c, const shePrecomputedPublicKey *ppub, mclInt m); + +/* + m must be 0 or 1 +*/ +MCLSHE_DLL_API int sheEncWithZkpBinG1(sheCipherTextG1 *c, sheZkpBin *zkp, const shePublicKey *pub, int m); +MCLSHE_DLL_API int sheEncWithZkpBinG2(sheCipherTextG2 *c, sheZkpBin *zkp, const shePublicKey *pub, int m); +MCLSHE_DLL_API int sheEncWithZkpBinEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpBinEq *zkp, const shePublicKey *pub, int m); +MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpBinG1(sheCipherTextG1 *c, sheZkpBin *zkp, const shePrecomputedPublicKey *ppub, int m); +MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpBinG2(sheCipherTextG2 *c, sheZkpBin *zkp, const shePrecomputedPublicKey *ppub, int m); +MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpBinEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpBinEq *zkp, const shePrecomputedPublicKey *ppub, int m); + +/* + arbitary m +*/ +MCLSHE_DLL_API int sheEncWithZkpEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpEq *zkp, const shePublicKey *pub, mclInt m); +MCLSHE_DLL_API int shePrecomputedPublicKeyEncWithZkpEq(sheCipherTextG1 *c1, sheCipherTextG2 *c2, sheZkpEq *zkp, const shePrecomputedPublicKey *ppub, mclInt m); + +/* + decode c and set m + return 0 if success +*/ +MCLSHE_DLL_API int sheDecG1(mclInt *m, const sheSecretKey *sec, const sheCipherTextG1 *c); +MCLSHE_DLL_API int sheDecG2(mclInt *m, const sheSecretKey *sec, const sheCipherTextG2 *c); +MCLSHE_DLL_API int sheDecGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextGT *c); +/* + verify zkp + return 1 if valid +*/ +MCLSHE_DLL_API int sheVerifyZkpBinG1(const shePublicKey *pub, const sheCipherTextG1 *c, const sheZkpBin *zkp); +MCLSHE_DLL_API int sheVerifyZkpBinG2(const shePublicKey *pub, const sheCipherTextG2 *c, const sheZkpBin *zkp); +MCLSHE_DLL_API int sheVerifyZkpEq(const shePublicKey *pub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpEq *zkp); +MCLSHE_DLL_API int sheVerifyZkpBinEq(const shePublicKey *pub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpBinEq *zkp); +MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpBinG1(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c, const sheZkpBin *zkp); +MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpBinG2(const shePrecomputedPublicKey *ppub, const sheCipherTextG2 *c, const sheZkpBin *zkp); +MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpEq(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpEq *zkp); +MCLSHE_DLL_API int shePrecomputedPublicKeyVerifyZkpBinEq(const shePrecomputedPublicKey *ppub, const sheCipherTextG1 *c1, const sheCipherTextG2 *c2, const sheZkpBinEq *zkp); +/* + decode c via GT and set m + return 0 if success +*/ +MCLSHE_DLL_API int sheDecG1ViaGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextG1 *c); +MCLSHE_DLL_API int sheDecG2ViaGT(mclInt *m, const sheSecretKey *sec, const sheCipherTextG2 *c); + +/* + return 1 if dec(c) == 0 +*/ +MCLSHE_DLL_API int sheIsZeroG1(const sheSecretKey *sec, const sheCipherTextG1 *c); +MCLSHE_DLL_API int sheIsZeroG2(const sheSecretKey *sec, const sheCipherTextG2 *c); +MCLSHE_DLL_API int sheIsZeroGT(const sheSecretKey *sec, const sheCipherTextGT *c); + +// return 0 if success +// y = -x +MCLSHE_DLL_API int sheNegG1(sheCipherTextG1 *y, const sheCipherTextG1 *x); +MCLSHE_DLL_API int sheNegG2(sheCipherTextG2 *y, const sheCipherTextG2 *x); +MCLSHE_DLL_API int sheNegGT(sheCipherTextGT *y, const sheCipherTextGT *x); + +// return 0 if success +// z = x + y +MCLSHE_DLL_API int sheAddG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, const sheCipherTextG1 *y); +MCLSHE_DLL_API int sheAddG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, const sheCipherTextG2 *y); +MCLSHE_DLL_API int sheAddGT(sheCipherTextGT *z, const sheCipherTextGT *x, const sheCipherTextGT *y); + +// return 0 if success +// z = x - y +MCLSHE_DLL_API int sheSubG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, const sheCipherTextG1 *y); +MCLSHE_DLL_API int sheSubG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, const sheCipherTextG2 *y); +MCLSHE_DLL_API int sheSubGT(sheCipherTextGT *z, const sheCipherTextGT *x, const sheCipherTextGT *y); + +// return 0 if success +// z = x * y +MCLSHE_DLL_API int sheMulG1(sheCipherTextG1 *z, const sheCipherTextG1 *x, mclInt y); +MCLSHE_DLL_API int sheMulG2(sheCipherTextG2 *z, const sheCipherTextG2 *x, mclInt y); +MCLSHE_DLL_API int sheMulGT(sheCipherTextGT *z, const sheCipherTextGT *x, mclInt y); + +// return 0 if success +// z = x * y +MCLSHE_DLL_API int sheMul(sheCipherTextGT *z, const sheCipherTextG1 *x, const sheCipherTextG2 *y); +/* + sheMul(z, x, y) = sheMulML(z, x, y) + sheFinalExpGT(z) + @note + Mul(x1, y1) + ... + Mul(xn, yn) = finalExp(MulML(x1, y1) + ... + MulML(xn, yn)) +*/ +MCLSHE_DLL_API int sheMulML(sheCipherTextGT *z, const sheCipherTextG1 *x, const sheCipherTextG2 *y); +MCLSHE_DLL_API int sheFinalExpGT(sheCipherTextGT *y, const sheCipherTextGT *x); + +// return 0 if success +// rerandomize(c) +MCLSHE_DLL_API int sheReRandG1(sheCipherTextG1 *c, const shePublicKey *pub); +MCLSHE_DLL_API int sheReRandG2(sheCipherTextG2 *c, const shePublicKey *pub); +MCLSHE_DLL_API int sheReRandGT(sheCipherTextGT *c, const shePublicKey *pub); + +// return 0 if success +// y = convert(x) +MCLSHE_DLL_API int sheConvertG1(sheCipherTextGT *y, const shePublicKey *pub, const sheCipherTextG1 *x); +MCLSHE_DLL_API int sheConvertG2(sheCipherTextGT *y, const shePublicKey *pub, const sheCipherTextG2 *x); + +// return nonzero if success +MCLSHE_DLL_API shePrecomputedPublicKey *shePrecomputedPublicKeyCreate(); +// call this function to avoid memory leak +MCLSHE_DLL_API void shePrecomputedPublicKeyDestroy(shePrecomputedPublicKey *ppub); +// return 0 if success +MCLSHE_DLL_API int shePrecomputedPublicKeyInit(shePrecomputedPublicKey *ppub, const shePublicKey *pub); + +#ifdef __cplusplus +} +#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/she.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/she.hpp new file mode 100644 index 000000000..3ce361454 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/she.hpp @@ -0,0 +1,1939 @@ +#pragma once +/** + @file + @brief somewhat homomorphic encryption with one-time multiplication, based on prime-order pairings + @author MITSUNARI Shigeo(@herumi) + see https://github.com/herumi/mcl/blob/master/misc/she/she.pdf + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <cmath> +#include <vector> +#include <iosfwd> +#ifndef MCLBN_FP_UNIT_SIZE + #define MCLBN_FP_UNIT_SIZE 4 +#endif +#if MCLBN_FP_UNIT_SIZE == 4 +#include <mcl/bn256.hpp> +#elif MCLBN_FP_UNIT_SIZE == 6 +#include <mcl/bn384.hpp> +#elif MCLBN_FP_UNIT_SIZE == 8 +#include <mcl/bn512.hpp> +#else + #error "MCLBN_FP_UNIT_SIZE must be 4, 6, or 8" +#endif + +#include <mcl/window_method.hpp> +#include <cybozu/endian.hpp> +#include <cybozu/serializer.hpp> + +namespace mcl { namespace she { + +using namespace mcl::bn; + +namespace local { + +#ifndef MCLSHE_WIN_SIZE + #define MCLSHE_WIN_SIZE 10 +#endif +static const size_t winSize = MCLSHE_WIN_SIZE; +static const size_t defaultTryNum = 2048; + +struct KeyCount { + uint32_t key; + int32_t count; // power + bool operator<(const KeyCount& rhs) const + { + return key < rhs.key; + } + bool isSame(const KeyCount& rhs) const + { + return key == rhs.key && count == rhs.count; + } +}; + +template<class G, bool = true> +struct InterfaceForHashTable : G { + static G& castG(InterfaceForHashTable& x) { return static_cast<G&>(x); } + static const G& castG(const InterfaceForHashTable& x) { return static_cast<const G&>(x); } + void clear() { clear(castG(*this)); } + void normalize() { normalize(castG(*this)); } + static bool isOdd(const G& P) { return P.y.isOdd(); } + static bool isZero(const G& P) { return P.isZero(); } + static bool isSameX(const G& P, const G& Q) { return P.x == Q.x; } + static uint32_t getHash(const G& P) { return uint32_t(*P.x.getUnit()); } + static void clear(G& P) { P.clear(); } + static void normalize(G& P) { P.normalize(); } + static void dbl(G& Q, const G& P) { G::dbl(Q, P); } + static void neg(G& Q, const G& P) { G::neg(Q, P); } + static void add(G& R, const G& P, const G& Q) { G::add(R, P, Q); } + template<class INT> + static void mul(G& Q, const G& P, const INT& x) { G::mul(Q, P, x); } +}; + +/* + treat Fp12 as EC + unitary inverse of (a, b) = (a, -b) + then b.a.a or -b.a.a is odd +*/ +template<class G> +struct InterfaceForHashTable<G, false> : G { + static G& castG(InterfaceForHashTable& x) { return static_cast<G&>(x); } + static const G& castG(const InterfaceForHashTable& x) { return static_cast<const G&>(x); } + void clear() { clear(castG(*this)); } + void normalize() { normalize(castG(*this)); } + static bool isOdd(const G& x) { return x.b.a.a.isOdd(); } + static bool isZero(const G& x) { return x.isOne(); } + static bool isSameX(const G& x, const G& Q) { return x.a == Q.a; } + static uint32_t getHash(const G& x) { return uint32_t(*x.getFp0()->getUnit()); } + static void clear(G& x) { x = 1; } + static void normalize(G&) { } + static void dbl(G& y, const G& x) { G::sqr(y, x); } + static void neg(G& Q, const G& P) { G::unitaryInv(Q, P); } + static void add(G& z, const G& x, const G& y) { G::mul(z, x, y); } + template<class INT> + static void mul(G& z, const G& x, const INT& y) { G::pow(z, x, y); } +}; + +template<class G> +char GtoChar(); +template<>char GtoChar<bn::G1>() { return '1'; } +template<>char GtoChar<bn::G2>() { return '2'; } +template<>char GtoChar<bn::GT>() { return 'T'; } + +/* + HashTable<EC, true> or HashTable<Fp12, false> +*/ +template<class G, bool isEC = true> +class HashTable { + typedef InterfaceForHashTable<G, isEC> I; + typedef std::vector<KeyCount> KeyCountVec; + KeyCountVec kcv_; + G P_; + mcl::fp::WindowMethod<I> wm_; + G nextP_; + G nextNegP_; + size_t tryNum_; + void setWindowMethod() + { + const size_t bitSize = G::BaseFp::BaseFp::getBitSize(); + wm_.init(static_cast<const I&>(P_), bitSize, local::winSize); + } +public: + HashTable() : tryNum_(local::defaultTryNum) {} + bool operator==(const HashTable& rhs) const + { + if (kcv_.size() != rhs.kcv_.size()) return false; + for (size_t i = 0; i < kcv_.size(); i++) { + if (!kcv_[i].isSame(rhs.kcv_[i])) return false; + } + return P_ == rhs.P_ && nextP_ == rhs.nextP_; + } + bool operator!=(const HashTable& rhs) const { return !operator==(rhs); } + /* + compute log_P(xP) for |x| <= hashSize * tryNum + */ + void init(const G& P, size_t hashSize, size_t tryNum = local::defaultTryNum) + { + if (hashSize == 0) { + kcv_.clear(); + return; + } + if (hashSize >= 0x80000000u) throw cybozu::Exception("HashTable:init:hashSize is too large"); + P_ = P; + tryNum_ = tryNum; + kcv_.resize(hashSize); + G xP; + I::clear(xP); + for (int i = 1; i <= (int)kcv_.size(); i++) { + I::add(xP, xP, P_); + I::normalize(xP); + kcv_[i - 1].key = I::getHash(xP); + kcv_[i - 1].count = I::isOdd(xP) ? i : -i; + } + nextP_ = xP; + I::dbl(nextP_, nextP_); + I::add(nextP_, nextP_, P_); // nextP = (hasSize * 2 + 1)P + I::neg(nextNegP_, nextP_); // nextNegP = -nextP + /* + ascending order of abs(count) for same key + */ + std::stable_sort(kcv_.begin(), kcv_.end()); + setWindowMethod(); + } + void setTryNum(size_t tryNum) + { + this->tryNum_ = tryNum; + } + /* + log_P(xP) + find range which has same hash of xP in kcv_, + and detect it + */ + int basicLog(G xP, bool *ok = 0) const + { + if (ok) *ok = true; + if (I::isZero(xP)) return 0; + typedef KeyCountVec::const_iterator Iter; + KeyCount kc; + I::normalize(xP); + kc.key = I::getHash(xP); + kc.count = 0; + std::pair<Iter, Iter> p = std::equal_range(kcv_.begin(), kcv_.end(), kc); + G Q; + I::clear(Q); + int prev = 0; + /* + check range which has same hash + */ + while (p.first != p.second) { + int count = p.first->count; + int abs_c = std::abs(count); + assert(abs_c >= prev); // assume ascending order + bool neg = count < 0; + G T; +// I::mul(T, P, abs_c - prev); + mulByWindowMethod(T, abs_c - prev); + I::add(Q, Q, T); + I::normalize(Q); + if (I::isSameX(Q, xP)) { + bool QisOdd = I::isOdd(Q); + bool xPisOdd = I::isOdd(xP); + if (QisOdd ^ xPisOdd ^ neg) return -count; + return count; + } + prev = abs_c; + ++p.first; + } + if (ok) { + *ok = false; + return 0; + } + throw cybozu::Exception("HashTable:basicLog:not found"); + } + /* + compute log_P(xP) + call basicLog at most 2 * tryNum + */ + int64_t log(const G& xP) const + { + bool ok; + int c = basicLog(xP, &ok); + if (ok) { + return c; + } + G posP = xP, negP = xP; + int64_t posCenter = 0; + int64_t negCenter = 0; + int64_t next = (int64_t)kcv_.size() * 2 + 1; + for (size_t i = 1; i < tryNum_; i++) { + I::add(posP, posP, nextNegP_); + posCenter += next; + c = basicLog(posP, &ok); + if (ok) { + return posCenter + c; + } + I::add(negP, negP, nextP_); + negCenter -= next; + c = basicLog(negP, &ok); + if (ok) { + return negCenter + c; + } + } + throw cybozu::Exception("HashTable:log:not found"); + } + /* + remark + tryNum is not saved. + */ + template<class OutputStream> + void save(OutputStream& os) const + { + cybozu::save(os, BN::param.cp.curveType); + cybozu::writeChar(os, GtoChar<G>()); + cybozu::save(os, kcv_.size()); + cybozu::write(os, &kcv_[0], sizeof(kcv_[0]) * kcv_.size()); + P_.save(os); + } + size_t save(void *buf, size_t maxBufSize) const + { + cybozu::MemoryOutputStream os(buf, maxBufSize); + save(os); + return os.getPos(); + } + /* + remark + tryNum is not set + */ + template<class InputStream> + void load(InputStream& is) + { + int curveType; + cybozu::load(curveType, is); + if (curveType != BN::param.cp.curveType) throw cybozu::Exception("HashTable:bad curveType") << curveType; + char c = 0; + if (!cybozu::readChar(&c, is) || c != GtoChar<G>()) throw cybozu::Exception("HashTable:bad c") << (int)c; + size_t kcvSize; + cybozu::load(kcvSize, is); + kcv_.resize(kcvSize); + cybozu::read(&kcv_[0], sizeof(kcv_[0]) * kcvSize, is); + P_.load(is); + I::mul(nextP_, P_, (kcvSize * 2) + 1); + I::neg(nextNegP_, nextP_); + setWindowMethod(); + } + size_t load(const void *buf, size_t bufSize) + { + cybozu::MemoryInputStream is(buf, bufSize); + load(is); + return is.getPos(); + } + const mcl::fp::WindowMethod<I>& getWM() const { return wm_; } + /* + mul(x, P, y); + */ + template<class T> + void mulByWindowMethod(G& x, const T& y) const + { + wm_.mul(static_cast<I&>(x), y); + } +}; + +template<class G> +int log(const G& P, const G& xP) +{ + if (xP.isZero()) return 0; + if (xP == P) return 1; + G negT; + G::neg(negT, P); + if (xP == negT) return -1; + G T = P; + for (int i = 2; i < 100; i++) { + T += P; + if (xP == T) return i; + G::neg(negT, T); + if (xP == negT) return -i; + } + throw cybozu::Exception("she:log:not found"); +} + +} // mcl::she::local + +template<size_t dummyInpl = 0> +struct SHET { + class SecretKey; + class PublicKey; + class PrecomputedPublicKey; + // additive HE + class CipherTextA; // = CipherTextG1 + CipherTextG2 + class CipherTextGT; // multiplicative HE + class CipherText; // CipherTextA + CipherTextGT + + static G1 P_; + static G2 Q_; + static GT ePQ_; // e(P, Q) + static std::vector<Fp6> Qcoeff_; + static local::HashTable<G1> PhashTbl_; + static local::HashTable<G2> QhashTbl_; + static mcl::fp::WindowMethod<G2> Qwm_; + typedef local::InterfaceForHashTable<GT, false> GTasEC; + static local::HashTable<GT, false> ePQhashTbl_; + static bool useDecG1ViaGT_; + static bool useDecG2ViaGT_; + static bool isG1only_; +private: + template<class G> + class CipherTextAT : public fp::Serializable<CipherTextAT<G> > { + G S_, T_; + friend class SecretKey; + friend class PublicKey; + friend class PrecomputedPublicKey; + friend class CipherTextA; + friend class CipherTextGT; + bool isZero(const Fr& x) const + { + G xT; + G::mul(xT, T_, x); + return S_ == xT; + } + public: + const G& getS() const { return S_; } + const G& getT() const { return T_; } + void clear() + { + S_.clear(); + T_.clear(); + } + static void add(CipherTextAT& z, const CipherTextAT& x, const CipherTextAT& y) + { + /* + (S, T) + (S', T') = (S + S', T + T') + */ + G::add(z.S_, x.S_, y.S_); + G::add(z.T_, x.T_, y.T_); + } + static void sub(CipherTextAT& z, const CipherTextAT& x, const CipherTextAT& y) + { + /* + (S, T) - (S', T') = (S - S', T - T') + */ + G::sub(z.S_, x.S_, y.S_); + G::sub(z.T_, x.T_, y.T_); + } + // INT = int64_t or Fr + template<class INT> + static void mul(CipherTextAT& z, const CipherTextAT& x, const INT& y) + { + G::mul(z.S_, x.S_, y); + G::mul(z.T_, x.T_, y); + } + static void neg(CipherTextAT& y, const CipherTextAT& x) + { + G::neg(y.S_, x.S_); + G::neg(y.T_, x.T_); + } + void add(const CipherTextAT& c) { add(*this, *this, c); } + void sub(const CipherTextAT& c) { sub(*this, *this, c); } + template<class InputStream> + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + S_.load(pb, is, ioMode); if (!*pb) return; + T_.load(pb, is, ioMode); + } + template<class OutputStream> + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + S_.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + T_.save(pb, os, ioMode); + } + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("she:CipherTextA:load"); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("she:CipherTextA:save"); + } + friend std::istream& operator>>(std::istream& is, CipherTextAT& self) + { + self.load(is, fp::detectIoMode(G::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const CipherTextAT& self) + { + self.save(os, fp::detectIoMode(G::getIoMode(), os)); + return os; + } + bool operator==(const CipherTextAT& rhs) const + { + return S_ == rhs.S_ && T_ == rhs.T_; + } + bool operator!=(const CipherTextAT& rhs) const { return !operator==(rhs); } + }; + /* + g1 = millerLoop(P1, Q) + g2 = millerLoop(P2, Q) + */ + static void doubleMillerLoop(GT& g1, GT& g2, const G1& P1, const G1& P2, const G2& Q) + { +#if 1 + std::vector<Fp6> Qcoeff; + precomputeG2(Qcoeff, Q); + precomputedMillerLoop(g1, P1, Qcoeff); + precomputedMillerLoop(g2, P2, Qcoeff); +#else + millerLoop(g1, P1, Q); + millerLoop(g2, P2, Q); +#endif + } + static void finalExp4(GT out[4], const GT in[4]) + { + for (int i = 0; i < 4; i++) { + finalExp(out[i], in[i]); + } + } + static void tensorProductML(GT g[4], const G1& S1, const G1& T1, const G2& S2, const G2& T2) + { + /* + (S1, T1) x (S2, T2) = (ML(S1, S2), ML(S1, T2), ML(T1, S2), ML(T1, T2)) + */ + doubleMillerLoop(g[0], g[2], S1, T1, S2); + doubleMillerLoop(g[1], g[3], S1, T1, T2); + } + static void tensorProduct(GT g[4], const G1& S1, const G1& T1, const G2& S2, const G2& T2) + { + /* + (S1, T1) x (S2, T2) = (e(S1, S2), e(S1, T2), e(T1, S2), e(T1, T2)) + */ + tensorProductML(g,S1, T1, S2,T2); + finalExp4(g, g); + } + template<class Tag, size_t n> + struct ZkpT : public fp::Serializable<ZkpT<Tag, n> > { + Fr d_[n]; + template<class InputStream> + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + for (size_t i = 0; i < n; i++) { + d_[i].load(pb, is, ioMode); if (!*pb) return; + } + } + template<class OutputStream> + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + d_[0].save(pb, os, ioMode); if (!*pb) return; + for (size_t i = 1; i < n; i++) { + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + d_[i].save(pb, os, ioMode); + } + } + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("she:ZkpT:load"); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("she:ZkpT:save"); + } + friend std::istream& operator>>(std::istream& is, ZkpT& self) + { + self.load(is, fp::detectIoMode(Fr::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const ZkpT& self) + { + self.save(os, fp::detectIoMode(Fr::getIoMode(), os)); + return os; + } + }; + struct ZkpBinTag; + struct ZkpEqTag; // d_[] = { c, sp, ss, sm } + struct ZkpBinEqTag; // d_[] = { d0, d1, sp0, sp1, ss, sp, sm } +public: + /* + Zkp for m = 0 or 1 + */ + typedef ZkpT<ZkpBinTag, 4> ZkpBin; + /* + Zkp for decG1(c1) == decG2(c2) + */ + typedef ZkpT<ZkpEqTag, 4> ZkpEq; + /* + Zkp for (m = 0 or 1) and decG1(c1) == decG2(c2) + */ + typedef ZkpT<ZkpBinEqTag, 7> ZkpBinEq; + + typedef CipherTextAT<G1> CipherTextG1; + typedef CipherTextAT<G2> CipherTextG2; + + static void init(const mcl::CurveParam& cp = mcl::BN254, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum) + { + initPairing(cp); + hashAndMapToG1(P_, "0"); + hashAndMapToG2(Q_, "0"); + pairing(ePQ_, P_, Q_); + precomputeG2(Qcoeff_, Q_); + setRangeForDLP(hashSize); + useDecG1ViaGT_ = false; + useDecG2ViaGT_ = false; + isG1only_ = false; + setTryNum(tryNum); + } + static void init(size_t hashSize, size_t tryNum = local::defaultTryNum) + { + init(mcl::BN254, hashSize, tryNum); + } + /* + standard lifted ElGamal encryption + */ + static void initG1only(const mcl::EcParam& para, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum) + { + Fp::init(para.p); + Fr::init(para.n); + G1::init(para.a, para.b); + const Fp x0(para.gx); + const Fp y0(para.gy); + P_.set(x0, y0); + + setRangeForG1DLP(hashSize); + useDecG1ViaGT_ = false; + useDecG2ViaGT_ = false; + isG1only_ = true; + setTryNum(tryNum); + } + /* + set range for G1-DLP + */ + static void setRangeForG1DLP(size_t hashSize) + { + PhashTbl_.init(P_, hashSize); + } + /* + set range for G2-DLP + */ + static void setRangeForG2DLP(size_t hashSize) + { + QhashTbl_.init(Q_, hashSize); + } + /* + set range for GT-DLP + */ + static void setRangeForGTDLP(size_t hashSize) + { + ePQhashTbl_.init(ePQ_, hashSize); + } + /* + set range for G1/G2/GT DLP + decode message m for |m| <= hasSize * tryNum + decode time = O(log(hasSize) * tryNum) + */ + static void setRangeForDLP(size_t hashSize) + { + setRangeForG1DLP(hashSize); + setRangeForG2DLP(hashSize); + setRangeForGTDLP(hashSize); + } + static void setTryNum(size_t tryNum) + { + PhashTbl_.setTryNum(tryNum); + QhashTbl_.setTryNum(tryNum); + ePQhashTbl_.setTryNum(tryNum); + } + static void useDecG1ViaGT(bool use = true) + { + useDecG1ViaGT_ = use; + } + static void useDecG2ViaGT(bool use = true) + { + useDecG2ViaGT_ = use; + } + /* + only one element is necessary for each G1 and G2. + this is better than David Mandell Freeman's algorithm + */ + class SecretKey : public fp::Serializable<SecretKey> { + Fr x_, y_; + void getPowOfePQ(GT& v, const CipherTextGT& c) const + { + /* + (s, t, u, v) := (e(S, S'), e(S, T'), e(T, S'), e(T, T')) + s v^(xy) / (t^y u^x) = s (v^x / t) ^ y / u^x + = e(P, Q)^(mm') + */ + GT t, u; + GT::unitaryInv(t, c.g_[1]); + GT::unitaryInv(u, c.g_[2]); + GT::pow(v, c.g_[3], x_); + v *= t; + GT::pow(v, v, y_); + GT::pow(u, u, x_); + v *= u; + v *= c.g_[0]; + } + public: + void setByCSPRNG() + { + x_.setRand(); + if (!isG1only_) y_.setRand(); + } + /* + set xP and yQ + */ + void getPublicKey(PublicKey& pub) const + { + pub.set(x_, y_); + } +#if 0 + // log_x(y) + int log(const GT& x, const GT& y) const + { + if (y == 1) return 0; + if (y == x) return 1; + GT inv; + GT::unitaryInv(inv, x); + if (y == inv) return -1; + GT t = x; + for (int i = 2; i < 100; i++) { + t *= x; + if (y == t) return i; + GT::unitaryInv(inv, t); + if (y == inv) return -i; + } + throw cybozu::Exception("she:dec:log:not found"); + } +#endif + int64_t dec(const CipherTextG1& c) const + { + if (useDecG1ViaGT_) return decViaGT(c); + /* + S = mP + rxP + T = rP + R = S - xT = mP + */ + G1 R; + G1::mul(R, c.T_, x_); + G1::sub(R, c.S_, R); + return PhashTbl_.log(R); + } + int64_t dec(const CipherTextG2& c) const + { + if (useDecG2ViaGT_) return decViaGT(c); + G2 R; + G2::mul(R, c.T_, y_); + G2::sub(R, c.S_, R); + return QhashTbl_.log(R); + } + int64_t dec(const CipherTextA& c) const + { + return dec(c.c1_); + } + int64_t dec(const CipherTextGT& c) const + { + GT v; + getPowOfePQ(v, c); + return ePQhashTbl_.log(v); +// return log(g, v); + } + int64_t decViaGT(const CipherTextG1& c) const + { + G1 R; + G1::mul(R, c.T_, x_); + G1::sub(R, c.S_, R); + GT v; + pairing(v, R, Q_); + return ePQhashTbl_.log(v); + } + int64_t decViaGT(const CipherTextG2& c) const + { + G2 R; + G2::mul(R, c.T_, y_); + G2::sub(R, c.S_, R); + GT v; + pairing(v, P_, R); + return ePQhashTbl_.log(v); + } + int64_t dec(const CipherText& c) const + { + if (c.isMultiplied()) { + return dec(c.m_); + } else { + return dec(c.a_); + } + } + bool isZero(const CipherTextG1& c) const + { + return c.isZero(x_); + } + bool isZero(const CipherTextG2& c) const + { + return c.isZero(y_); + } + bool isZero(const CipherTextA& c) const + { + return c.c1_.isZero(x_); + } + bool isZero(const CipherTextGT& c) const + { + GT v; + getPowOfePQ(v, c); + return v.isOne(); + } + bool isZero(const CipherText& c) const + { + if (c.isMultiplied()) { + return isZero(c.m_); + } else { + return isZero(c.a_); + } + } + template<class InputStream> + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + x_.load(pb, is, ioMode); if (!*pb) return; + if (!isG1only_) y_.load(pb, is, ioMode); + } + template<class OutputStream> + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + x_.save(pb, os, ioMode); if (!*pb) return; + if (isG1only_) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + y_.save(os, ioMode); + } + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("she:SecretKey:load"); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("she:SecretKey:save"); + } + friend std::istream& operator>>(std::istream& is, SecretKey& self) + { + self.load(is, fp::detectIoMode(Fr::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const SecretKey& self) + { + self.save(os, fp::detectIoMode(Fr::getIoMode(), os)); + return os; + } + bool operator==(const SecretKey& rhs) const + { + return x_ == rhs.x_ && (isG1only_ || y_ == rhs.y_); + } + bool operator!=(const SecretKey& rhs) const { return !operator==(rhs); } + }; +private: + /* + simple ElGamal encryptionfor G1 and G2 + (S, T) = (m P + r xP, rP) + Pmul.mul(X, a) // X = a P + xPmul.mul(X, a) // X = a xP + use *encRand if encRand is not null + */ + template<class G, class INT, class MulG, class I> + static void ElGamalEnc(G& S, G& T, const INT& m, const mcl::fp::WindowMethod<I>& Pmul, const MulG& xPmul, const Fr *encRand = 0) + { + Fr r; + if (encRand) { + r = *encRand; + } else { + r.setRand(); + } + Pmul.mul(static_cast<I&>(T), r); + xPmul.mul(S, r); // S = r xP + if (m == 0) return; + G C; + Pmul.mul(static_cast<I&>(C), m); + S += C; + } + /* + https://github.com/herumi/mcl/blob/master/misc/she/nizkp.pdf + + encRand is a random value used for ElGamalEnc() + d[1-m] ; rand + s[1-m] ; rand + R[0][1-m] = s[1-m] P - d[1-m] T + R[1][1-m] = s[1-m] xP - d[1-m] (S - (1-m) P) + r ; rand + R[0][m] = r P + R[1][m] = r xP + c = H(S, T, R[0][0], R[0][1], R[1][0], R[1][1]) + d[m] = c - d[1-m] + s[m] = r + d[m] encRand + */ + template<class G, class I, class MulG> + static void makeZkpBin(ZkpBin& zkp, const G& S, const G& T, const Fr& encRand, const G& P, int m, const mcl::fp::WindowMethod<I>& Pmul, const MulG& xPmul) + { + if (m != 0 && m != 1) throw cybozu::Exception("makeZkpBin:bad m") << m; + Fr *s = &zkp.d_[0]; + Fr *d = &zkp.d_[2]; + G R[2][2]; + d[1-m].setRand(); + s[1-m].setRand(); + G T1, T2; + Pmul.mul(static_cast<I&>(T1), s[1-m]); // T1 = s[1-m] P + G::mul(T2, T, d[1-m]); + G::sub(R[0][1-m], T1, T2); // s[1-m] P - d[1-m]T + xPmul.mul(T1, s[1-m]); // T1 = s[1-m] xP + if (m == 0) { + G::sub(T2, S, P); + G::mul(T2, T2, d[1-m]); + } else { + G::mul(T2, S, d[1-m]); + } + G::sub(R[1][1-m], T1, T2); // s[1-m] xP - d[1-m](S - (1-m) P) + Fr r; + r.setRand(); + Pmul.mul(static_cast<I&>(R[0][m]), r); // R[0][m] = r P + xPmul.mul(R[1][m], r); // R[1][m] = r xP + char buf[sizeof(G) * 2]; + cybozu::MemoryOutputStream os(buf, sizeof(buf)); + S.save(os); + T.save(os); + R[0][0].save(os); + R[0][1].save(os); + R[1][0].save(os); + R[1][1].save(os); + Fr c; + c.setHashOf(buf, os.getPos()); + d[m] = c - d[1-m]; + s[m] = r + d[m] * encRand; + } + /* + R[0][i] = s[i] P - d[i] T ; i = 0,1 + R[1][0] = s[0] xP - d[0] S + R[1][1] = s[1] xP - d[1](S - P) + c = H(S, T, R[0][0], R[0][1], R[1][0], R[1][1]) + c == d[0] + d[1] + */ + template<class G, class I, class MulG> + static bool verifyZkpBin(const G& S, const G& T, const G& P, const ZkpBin& zkp, const mcl::fp::WindowMethod<I>& Pmul, const MulG& xPmul) + { + const Fr *s = &zkp.d_[0]; + const Fr *d = &zkp.d_[2]; + G R[2][2]; + G T1, T2; + for (int i = 0; i < 2; i++) { + Pmul.mul(static_cast<I&>(T1), s[i]); // T1 = s[i] P + G::mul(T2, T, d[i]); + G::sub(R[0][i], T1, T2); + } + xPmul.mul(T1, s[0]); // T1 = s[0] xP + G::mul(T2, S, d[0]); + G::sub(R[1][0], T1, T2); + xPmul.mul(T1, s[1]); // T1 = x[1] xP + G::sub(T2, S, P); + G::mul(T2, T2, d[1]); + G::sub(R[1][1], T1, T2); + char buf[sizeof(G) * 2]; + cybozu::MemoryOutputStream os(buf, sizeof(buf)); + S.save(os); + T.save(os); + R[0][0].save(os); + R[0][1].save(os); + R[1][0].save(os); + R[1][1].save(os); + Fr c; + c.setHashOf(buf, os.getPos()); + return c == d[0] + d[1]; + } + /* + encRand1, encRand2 are random values use for ElGamalEnc() + */ + template<class G1, class G2, class INT, class I1, class I2, class MulG1, class MulG2> + static void makeZkpEq(ZkpEq& zkp, G1& S1, G1& T1, G2& S2, G2& T2, const INT& m, const mcl::fp::WindowMethod<I1>& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod<I2>& Qmul, const MulG2& yQmul) + { + Fr p, s; + p.setRand(); + s.setRand(); + ElGamalEnc(S1, T1, m, Pmul, xPmul, &p); + ElGamalEnc(S2, T2, m, Qmul, yQmul, &s); + Fr rp, rs, rm; + rp.setRand(); + rs.setRand(); + rm.setRand(); + G1 R1, R2; + G2 R3, R4; + ElGamalEnc(R1, R2, rm, Pmul, xPmul, &rp); + ElGamalEnc(R3, R4, rm, Qmul, yQmul, &rs); + char buf[sizeof(G1) * 4 + sizeof(G2) * 4]; + cybozu::MemoryOutputStream os(buf, sizeof(buf)); + S1.save(os); + T1.save(os); + S2.save(os); + T2.save(os); + R1.save(os); + R2.save(os); + R3.save(os); + R4.save(os); + Fr& c = zkp.d_[0]; + Fr& sp = zkp.d_[1]; + Fr& ss = zkp.d_[2]; + Fr& sm = zkp.d_[3]; + c.setHashOf(buf, os.getPos()); + Fr::mul(sp, c, p); + sp += rp; + Fr::mul(ss, c, s); + ss += rs; + Fr::mul(sm, c, m); + sm += rm; + } + template<class G1, class G2, class I1, class I2, class MulG1, class MulG2> + static bool verifyZkpEq(const ZkpEq& zkp, const G1& S1, const G1& T1, const G2& S2, const G2& T2, const mcl::fp::WindowMethod<I1>& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod<I2>& Qmul, const MulG2& yQmul) + { + const Fr& c = zkp.d_[0]; + const Fr& sp = zkp.d_[1]; + const Fr& ss = zkp.d_[2]; + const Fr& sm = zkp.d_[3]; + G1 R1, R2, X1; + G2 R3, R4, X2; + ElGamalEnc(R1, R2, sm, Pmul, xPmul, &sp); + G1::mul(X1, S1, c); + R1 -= X1; + G1::mul(X1, T1, c); + R2 -= X1; + ElGamalEnc(R3, R4, sm, Qmul, yQmul, &ss); + G2::mul(X2, S2, c); + R3 -= X2; + G2::mul(X2, T2, c); + R4 -= X2; + char buf[sizeof(G1) * 4 + sizeof(G2) * 4]; + cybozu::MemoryOutputStream os(buf, sizeof(buf)); + S1.save(os); + T1.save(os); + S2.save(os); + T2.save(os); + R1.save(os); + R2.save(os); + R3.save(os); + R4.save(os); + Fr c2; + c2.setHashOf(buf, os.getPos()); + return c == c2; + } + /* + encRand1, encRand2 are random values use for ElGamalEnc() + */ + template<class G1, class G2, class I1, class I2, class MulG1, class MulG2> + static void makeZkpBinEq(ZkpBinEq& zkp, G1& S1, G1& T1, G2& S2, G2& T2, int m, const mcl::fp::WindowMethod<I1>& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod<I2>& Qmul, const MulG2& yQmul) + { + if (m != 0 && m != 1) throw cybozu::Exception("makeZkpBinEq:bad m") << m; + Fr *d = &zkp.d_[0]; + Fr *spm = &zkp.d_[2]; + Fr& ss = zkp.d_[4]; + Fr& sp = zkp.d_[5]; + Fr& sm = zkp.d_[6]; + Fr p, s; + p.setRand(); + s.setRand(); + ElGamalEnc(S1, T1, m, Pmul, xPmul, &p); + ElGamalEnc(S2, T2, m, Qmul, yQmul, &s); + d[1-m].setRand(); + spm[1-m].setRand(); + G1 R1[2], R2[2], X1; + Pmul.mul(static_cast<I1&>(R1[1-m]), spm[1-m]); + G1::mul(X1, T1, d[1-m]); + R1[1-m] -= X1; + if (m == 0) { + G1::sub(X1, S1, P_); + G1::mul(X1, X1, d[1-m]); + } else { + G1::mul(X1, S1, d[1-m]); + } + xPmul.mul(R2[1-m], spm[1-m]); + R2[1-m] -= X1; + Fr rpm, rp, rs, rm; + rpm.setRand(); + rp.setRand(); + rs.setRand(); + rm.setRand(); + ElGamalEnc(R2[m], R1[m], 0, Pmul, xPmul, &rpm); + G1 R3, R4; + G2 R5, R6; + ElGamalEnc(R4, R3, rm, Pmul, xPmul, &rp); + ElGamalEnc(R6, R5, rm, Qmul, yQmul, &rs); + char buf[sizeof(Fr) * 12]; + cybozu::MemoryOutputStream os(buf, sizeof(buf)); + S1.save(os); + T1.save(os); + R1[0].save(os); + R1[1].save(os); + R2[0].save(os); + R2[1].save(os); + R3.save(os); + R4.save(os); + R5.save(os); + R6.save(os); + Fr c; + c.setHashOf(buf, os.getPos()); + Fr::sub(d[m], c, d[1-m]); + Fr::mul(spm[m], d[m], p); + spm[m] += rpm; + Fr::mul(sp, c, p); + sp += rp; + Fr::mul(ss, c, s); + ss += rs; + Fr::mul(sm, c, m); + sm += rm; + } + template<class G1, class G2, class I1, class I2, class MulG1, class MulG2> + static bool verifyZkpBinEq(const ZkpBinEq& zkp, const G1& S1, const G1& T1, const G2& S2, const G2& T2, const mcl::fp::WindowMethod<I1>& Pmul, const MulG1& xPmul, const mcl::fp::WindowMethod<I2>& Qmul, const MulG2& yQmul) + { + const Fr *d = &zkp.d_[0]; + const Fr *spm = &zkp.d_[2]; + const Fr& ss = zkp.d_[4]; + const Fr& sp = zkp.d_[5]; + const Fr& sm = zkp.d_[6]; + G1 R1[2], R2[2], X1; + for (int i = 0; i < 2; i++) { + Pmul.mul(static_cast<I1&>(R1[i]), spm[i]); + G1::mul(X1, T1, d[i]); + R1[i] -= X1; + } + xPmul.mul(R2[0], spm[0]); + G1::mul(X1, S1, d[0]); + R2[0] -= X1; + xPmul.mul(R2[1], spm[1]); + G1::sub(X1, S1, P_); + G1::mul(X1, X1, d[1]); + R2[1] -= X1; + Fr c; + Fr::add(c, d[0], d[1]); + G1 R3, R4; + G2 R5, R6; + ElGamalEnc(R4, R3, sm, Pmul, xPmul, &sp); + G1::mul(X1, T1, c); + R3 -= X1; + G1::mul(X1, S1, c); + R4 -= X1; + ElGamalEnc(R6, R5, sm, Qmul, yQmul, &ss); + G2 X2; + G2::mul(X2, T2, c); + R5 -= X2; + G2::mul(X2, S2, c); + R6 -= X2; + char buf[sizeof(Fr) * 12]; + cybozu::MemoryOutputStream os(buf, sizeof(buf)); + S1.save(os); + T1.save(os); + R1[0].save(os); + R1[1].save(os); + R2[0].save(os); + R2[1].save(os); + R3.save(os); + R4.save(os); + R5.save(os); + R6.save(os); + Fr c2; + c2.setHashOf(buf, os.getPos()); + return c == c2; + } + /* + common method for PublicKey and PrecomputedPublicKey + */ + template<class T> + struct PublicKeyMethod { + /* + you can use INT as int64_t and Fr, + but the return type of dec() is int64_t. + */ + template<class INT> + void enc(CipherTextG1& c, const INT& m) const + { + static_cast<const T&>(*this).encG1(c, m); + } + template<class INT> + void enc(CipherTextG2& c, const INT& m) const + { + static_cast<const T&>(*this).encG2(c, m); + } + template<class INT> + void enc(CipherTextA& c, const INT& m) const + { + enc(c.c1_, m); + enc(c.c2_, m); + } + template<class INT> + void enc(CipherTextGT& c, const INT& m) const + { + static_cast<const T&>(*this).encGT(c, m); + } + template<class INT> + void enc(CipherText& c, const INT& m, bool multiplied = false) const + { + c.isMultiplied_ = multiplied; + if (multiplied) { + enc(c.m_, m); + } else { + enc(c.a_, m); + } + } + /* + reRand method is for circuit privacy + */ + template<class CT> + void reRandT(CT& c) const + { + CT c0; + static_cast<const T&>(*this).enc(c0, 0); + CT::add(c, c, c0); + } + void reRand(CipherTextG1& c) const { reRandT(c); } + void reRand(CipherTextG2& c) const { reRandT(c); } + void reRand(CipherTextGT& c) const { reRandT(c); } + void reRand(CipherText& c) const + { + if (c.isMultiplied()) { + reRandT(c.m_); + } else { + reRandT(c.a_); + } + } + /* + convert from CipherTextG1 to CipherTextGT + */ + void convert(CipherTextGT& cm, const CipherTextG1& c1) const + { + /* + Enc(1) = (S, T) = (Q + r yQ, rQ) = (Q, 0) if r = 0 + cm = c1 * (Q, 0) = (S, T) * (Q, 0) = (e(S, Q), 1, e(T, Q), 1) + */ + precomputedMillerLoop(cm.g_[0], c1.getS(), Qcoeff_); + finalExp(cm.g_[0], cm.g_[0]); + precomputedMillerLoop(cm.g_[2], c1.getT(), Qcoeff_); + finalExp(cm.g_[2], cm.g_[2]); + + cm.g_[1] = cm.g_[3] = 1; + } + /* + convert from CipherTextG2 to CipherTextGT + */ + void convert(CipherTextGT& cm, const CipherTextG2& c2) const + { + /* + Enc(1) = (S, T) = (P + r xP, rP) = (P, 0) if r = 0 + cm = (P, 0) * c2 = (e(P, S), e(P, T), 1, 1) + */ + pairing(cm.g_[0], P_, c2.getS()); + pairing(cm.g_[1], P_, c2.getT()); + cm.g_[2] = cm.g_[3] = 1; + } + void convert(CipherTextGT& cm, const CipherTextA& ca) const + { + convert(cm, ca.c1_); + } + void convert(CipherText& cm, const CipherText& ca) const + { + if (ca.isMultiplied()) throw cybozu::Exception("she:PublicKey:convertCipherText:already isMultiplied"); + cm.isMultiplied_ = true; + convert(cm.m_, ca.a_); + } + }; +public: + class PublicKey : public fp::Serializable<PublicKey, + PublicKeyMethod<PublicKey> > { + G1 xP_; + G2 yQ_; + friend class SecretKey; + friend class PrecomputedPublicKey; + template<class T> + friend struct PublicKeyMethod; + template<class G> + struct MulG { + const G& base; + MulG(const G& base) : base(base) {} + template<class INT> + void mul(G& out, const INT& m) const + { + G::mul(out, base, m); + } + }; + void set(const Fr& x, const Fr& y) + { + G1::mul(xP_, P_, x); + if (!isG1only_) G2::mul(yQ_, Q_, y); + } + template<class INT> + void encG1(CipherTextG1& c, const INT& m) const + { + const MulG<G1> xPmul(xP_); + ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPmul); + } + template<class INT> + void encG2(CipherTextG2& c, const INT& m) const + { + const MulG<G2> yQmul(yQ_); + ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQmul); + } +public: + void encWithZkpBin(CipherTextG1& c, ZkpBin& zkp, int m) const + { + Fr encRand; + encRand.setRand(); + const MulG<G1> xPmul(xP_); + ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPmul, &encRand); + makeZkpBin(zkp, c.S_, c.T_, encRand, P_, m, PhashTbl_.getWM(), xPmul); + } + void encWithZkpBin(CipherTextG2& c, ZkpBin& zkp, int m) const + { + Fr encRand; + encRand.setRand(); + const MulG<G2> yQmul(yQ_); + ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQmul, &encRand); + makeZkpBin(zkp, c.S_, c.T_, encRand, Q_, m, QhashTbl_.getWM(), yQmul); + } + bool verify(const CipherTextG1& c, const ZkpBin& zkp) const + { + const MulG<G1> xPmul(xP_); + return verifyZkpBin(c.S_, c.T_, P_, zkp, PhashTbl_.getWM(), xPmul); + } + bool verify(const CipherTextG2& c, const ZkpBin& zkp) const + { + const MulG<G2> yQmul(yQ_); + return verifyZkpBin(c.S_, c.T_, Q_, zkp, QhashTbl_.getWM(), yQmul); + } + template<class INT> + void encWithZkpEq(CipherTextG1& c1, CipherTextG2& c2, ZkpEq& zkp, const INT& m) const + { + const MulG<G1> xPmul(xP_); + const MulG<G2> yQmul(yQ_); + makeZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul); + } + bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpEq& zkp) const + { + const MulG<G1> xPmul(xP_); + const MulG<G2> yQmul(yQ_); + return verifyZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul); + } + void encWithZkpBinEq(CipherTextG1& c1, CipherTextG2& c2, ZkpBinEq& zkp, int m) const + { + const MulG<G1> xPmul(xP_); + const MulG<G2> yQmul(yQ_); + makeZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul); + } + bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpBinEq& zkp) const + { + const MulG<G1> xPmul(xP_); + const MulG<G2> yQmul(yQ_); + return verifyZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPmul, QhashTbl_.getWM(), yQmul); + } + template<class INT> + void encGT(CipherTextGT& c, const INT& m) const + { + /* + (s, t, u, v) = ((e^x)^a (e^y)^b (e^-xy)^c e^m, e^b, e^a, e^c) + s = e(a xP + m P, Q)e(b P - c xP, yQ) + */ + Fr ra, rb, rc; + ra.setRand(); + rb.setRand(); + rc.setRand(); + GT e; + + G1 P1, P2; + G1::mul(P1, xP_, ra); + if (m) { +// G1::mul(P2, P, m); + PhashTbl_.mulByWindowMethod(P2, m); + P1 += P2; + } +// millerLoop(c.g[0], P1, Q); + precomputedMillerLoop(c.g_[0], P1, Qcoeff_); +// G1::mul(P1, P, rb); + PhashTbl_.mulByWindowMethod(P1, rb); + G1::mul(P2, xP_, rc); + P1 -= P2; + millerLoop(e, P1, yQ_); + c.g_[0] *= e; + finalExp(c.g_[0], c.g_[0]); +#if 1 + ePQhashTbl_.mulByWindowMethod(c.g_[1], rb); + ePQhashTbl_.mulByWindowMethod(c.g_[2], ra); + ePQhashTbl_.mulByWindowMethod(c.g_[3], rc); +#else + GT::pow(c.g_[1], ePQ_, rb); + GT::pow(c.g_[2], ePQ_, ra); + GT::pow(c.g_[3], ePQ_, rc); +#endif + } + public: + template<class InputStream> + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + xP_.load(pb, is, ioMode); if (!*pb) return; + if (!isG1only_) yQ_.load(pb, is, ioMode); + } + template<class OutputStream> + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + xP_.save(pb, os, ioMode); if (!*pb) return; + if (isG1only_) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + yQ_.save(pb, os, ioMode); + } + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("she:PublicKey:load"); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("she:PublicKey:save"); + } + friend std::istream& operator>>(std::istream& is, PublicKey& self) + { + self.load(is, fp::detectIoMode(G1::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const PublicKey& self) + { + self.save(os, fp::detectIoMode(G1::getIoMode(), os)); + return os; + } + bool operator==(const PublicKey& rhs) const + { + return xP_ == rhs.xP_ && (isG1only_ || yQ_ == rhs.yQ_); + } + bool operator!=(const PublicKey& rhs) const { return !operator==(rhs); } + }; + + class PrecomputedPublicKey : public fp::Serializable<PrecomputedPublicKey, + PublicKeyMethod<PrecomputedPublicKey> > { + typedef local::InterfaceForHashTable<GT, false> GTasEC; + typedef mcl::fp::WindowMethod<GTasEC> GTwin; + template<class T> + friend struct PublicKeyMethod; + GT exPQ_; + GT eyPQ_; + GT exyPQ_; + GTwin exPQwm_; + GTwin eyPQwm_; + GTwin exyPQwm_; + mcl::fp::WindowMethod<G1> xPwm_; + mcl::fp::WindowMethod<G2> yQwm_; + template<class T> + void mulByWindowMethod(GT& x, const GTwin& wm, const T& y) const + { + wm.mul(static_cast<GTasEC&>(x), y); + } + template<class INT> + void encG1(CipherTextG1& c, const INT& m) const + { + ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPwm_); + } + template<class INT> + void encG2(CipherTextG2& c, const INT& m) const + { + ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQwm_); + } + template<class INT> + void encGT(CipherTextGT& c, const INT& m) const + { + /* + (s, t, u, v) = (e^m e^(xya), (e^x)^b, (e^y)^c, e^(b + c - a)) + */ + Fr ra, rb, rc; + ra.setRand(); + rb.setRand(); + rc.setRand(); + GT t; + ePQhashTbl_.mulByWindowMethod(c.g_[0], m); // e^m + mulByWindowMethod(t, exyPQwm_, ra); // (e^xy)^a + c.g_[0] *= t; + mulByWindowMethod(c.g_[1], exPQwm_, rb); // (e^x)^b + mulByWindowMethod(c.g_[2], eyPQwm_, rc); // (e^y)^c + rb += rc; + rb -= ra; + ePQhashTbl_.mulByWindowMethod(c.g_[3], rb); + } + public: + void init(const PublicKey& pub) + { + const size_t bitSize = Fr::getBitSize(); + xPwm_.init(pub.xP_, bitSize, local::winSize); + if (isG1only_) return; + yQwm_.init(pub.yQ_, bitSize, local::winSize); + pairing(exPQ_, pub.xP_, Q_); + pairing(eyPQ_, P_, pub.yQ_); + pairing(exyPQ_, pub.xP_, pub.yQ_); + exPQwm_.init(static_cast<const GTasEC&>(exPQ_), bitSize, local::winSize); + eyPQwm_.init(static_cast<const GTasEC&>(eyPQ_), bitSize, local::winSize); + exyPQwm_.init(static_cast<const GTasEC&>(exyPQ_), bitSize, local::winSize); + } + void encWithZkpBin(CipherTextG1& c, ZkpBin& zkp, int m) const + { + Fr encRand; + encRand.setRand(); + ElGamalEnc(c.S_, c.T_, m, PhashTbl_.getWM(), xPwm_, &encRand); + makeZkpBin(zkp, c.S_, c.T_, encRand, P_, m, PhashTbl_.getWM(), xPwm_); + } + void encWithZkpBin(CipherTextG2& c, ZkpBin& zkp, int m) const + { + Fr encRand; + encRand.setRand(); + ElGamalEnc(c.S_, c.T_, m, QhashTbl_.getWM(), yQwm_, &encRand); + makeZkpBin(zkp, c.S_, c.T_, encRand, Q_, m, QhashTbl_.getWM(), yQwm_); + } + bool verify(const CipherTextG1& c, const ZkpBin& zkp) const + { + return verifyZkpBin(c.S_, c.T_, P_, zkp, PhashTbl_.getWM(), xPwm_); + } + bool verify(const CipherTextG2& c, const ZkpBin& zkp) const + { + return verifyZkpBin(c.S_, c.T_, Q_, zkp, QhashTbl_.getWM(), yQwm_); + } + template<class INT> + void encWithZkpEq(CipherTextG1& c1, CipherTextG2& c2, ZkpEq& zkp, const INT& m) const + { + makeZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_); + } + bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpEq& zkp) const + { + return verifyZkpEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_); + } + void encWithZkpBinEq(CipherTextG1& c1, CipherTextG2& c2, ZkpBinEq& zkp, int m) const + { + makeZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, m, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_); + } + bool verify(const CipherTextG1& c1, const CipherTextG2& c2, const ZkpBinEq& zkp) const + { + return verifyZkpBinEq(zkp, c1.S_, c1.T_, c2.S_, c2.T_, PhashTbl_.getWM(), xPwm_, QhashTbl_.getWM(), yQwm_); + } + }; + class CipherTextA { + CipherTextG1 c1_; + CipherTextG2 c2_; + friend class SecretKey; + friend class PublicKey; + friend class CipherTextGT; + template<class T> + friend struct PublicKeyMethod; + public: + void clear() + { + c1_.clear(); + c2_.clear(); + } + static void add(CipherTextA& z, const CipherTextA& x, const CipherTextA& y) + { + CipherTextG1::add(z.c1_, x.c1_, y.c1_); + CipherTextG2::add(z.c2_, x.c2_, y.c2_); + } + static void sub(CipherTextA& z, const CipherTextA& x, const CipherTextA& y) + { + CipherTextG1::sub(z.c1_, x.c1_, y.c1_); + CipherTextG2::sub(z.c2_, x.c2_, y.c2_); + } + static void mul(CipherTextA& z, const CipherTextA& x, int64_t y) + { + CipherTextG1::mul(z.c1_, x.c1_, y); + CipherTextG2::mul(z.c2_, x.c2_, y); + } + static void neg(CipherTextA& y, const CipherTextA& x) + { + CipherTextG1::neg(y.c1_, x.c1_); + CipherTextG2::neg(y.c2_, x.c2_); + } + void add(const CipherTextA& c) { add(*this, *this, c); } + void sub(const CipherTextA& c) { sub(*this, *this, c); } + template<class InputStream> + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + c1_.load(pb, is, ioMode); if (!*pb) return; + c2_.load(pb, is, ioMode); + } + template<class OutputStream> + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + c1_.save(pb, os, ioMode); if (!*pb) return; + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + c2_.save(pb, os, ioMode); + } + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("she:CipherTextA:load"); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("she:CipherTextA:save"); + } + friend std::istream& operator>>(std::istream& is, CipherTextA& self) + { + self.load(is, fp::detectIoMode(G1::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const CipherTextA& self) + { + self.save(os, fp::detectIoMode(G1::getIoMode(), os)); + return os; + } + bool operator==(const CipherTextA& rhs) const + { + return c1_ == rhs.c1_ && c2_ == rhs.c2_; + } + bool operator!=(const CipherTextA& rhs) const { return !operator==(rhs); } + }; + + class CipherTextGT : public fp::Serializable<CipherTextGT> { + GT g_[4]; + friend class SecretKey; + friend class PublicKey; + friend class PrecomputedPublicKey; + friend class CipherTextA; + template<class T> + friend struct PublicKeyMethod; + public: + void clear() + { + for (int i = 0; i < 4; i++) { + g_[i].setOne(); + } + } + static void neg(CipherTextGT& y, const CipherTextGT& x) + { + for (int i = 0; i < 4; i++) { + GT::unitaryInv(y.g_[i], x.g_[i]); + } + } + static void add(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y) + { + /* + (g[i]) + (g'[i]) = (g[i] * g'[i]) + */ + for (int i = 0; i < 4; i++) { + GT::mul(z.g_[i], x.g_[i], y.g_[i]); + } + } + static void sub(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y) + { + /* + (g[i]) - (g'[i]) = (g[i] / g'[i]) + */ + GT t; + for (size_t i = 0; i < 4; i++) { + GT::unitaryInv(t, y.g_[i]); + GT::mul(z.g_[i], x.g_[i], t); + } + } + static void mulML(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y) + { + /* + (S1, T1) * (S2, T2) = (ML(S1, S2), ML(S1, T2), ML(T1, S2), ML(T1, T2)) + */ + tensorProductML(z.g_, x.S_, x.T_, y.S_, y.T_); + } + static void finalExp(CipherTextGT& y, const CipherTextGT& x) + { + finalExp4(y.g_, x.g_); + } + /* + mul(x, y) = mulML(x, y) + finalExp + mul(c11, c12) + mul(c21, c22) + = finalExp(mulML(c11, c12) + mulML(c21, c22)), + then one finalExp can be reduced + */ + static void mul(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y) + { + /* + (S1, T1) * (S2, T2) = (e(S1, S2), e(S1, T2), e(T1, S2), e(T1, T2)) + */ + mulML(z, x, y); + finalExp(z, z); + } + static void mul(CipherTextGT& z, const CipherTextA& x, const CipherTextA& y) + { + mul(z, x.c1_, y.c2_); + } + static void mul(CipherTextGT& z, const CipherTextGT& x, int64_t y) + { + for (int i = 0; i < 4; i++) { + GT::pow(z.g_[i], x.g_[i], y); + } + } + void add(const CipherTextGT& c) { add(*this, *this, c); } + void sub(const CipherTextGT& c) { sub(*this, *this, c); } + template<class InputStream> + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + for (int i = 0; i < 4; i++) { + g_[i].load(pb, is, ioMode); if (!*pb) return; + } + } + template<class OutputStream> + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + const char sep = *fp::getIoSeparator(ioMode); + g_[0].save(pb, os, ioMode); if (!*pb) return; + for (int i = 1; i < 4; i++) { + if (sep) { + cybozu::writeChar(pb, os, sep); + if (!*pb) return; + } + g_[i].save(pb, os, ioMode); if (!*pb) return; + } + } + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("she:CipherTextGT:load"); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("she:CipherTextGT:save"); + } + friend std::istream& operator>>(std::istream& is, CipherTextGT& self) + { + self.load(is, fp::detectIoMode(G1::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const CipherTextGT& self) + { + self.save(os, fp::detectIoMode(G1::getIoMode(), os)); + return os; + } + bool operator==(const CipherTextGT& rhs) const + { + for (int i = 0; i < 4; i++) { + if (g_[i] != rhs.g_[i]) return false; + } + return true; + } + bool operator!=(const CipherTextGT& rhs) const { return !operator==(rhs); } + }; + + class CipherText : public fp::Serializable<CipherText> { + bool isMultiplied_; + CipherTextA a_; + CipherTextGT m_; + friend class SecretKey; + friend class PublicKey; + template<class T> + friend struct PublicKeyMethod; + public: + CipherText() : isMultiplied_(false) {} + void clearAsAdded() + { + isMultiplied_ = false; + a_.clear(); + } + void clearAsMultiplied() + { + isMultiplied_ = true; + m_.clear(); + } + bool isMultiplied() const { return isMultiplied_; } + static void add(CipherText& z, const CipherText& x, const CipherText& y) + { + if (x.isMultiplied() && y.isMultiplied()) { + z.isMultiplied_ = true; + CipherTextGT::add(z.m_, x.m_, y.m_); + return; + } + if (!x.isMultiplied() && !y.isMultiplied()) { + z.isMultiplied_ = false; + CipherTextA::add(z.a_, x.a_, y.a_); + return; + } + throw cybozu::Exception("she:CipherText:add:mixed CipherText"); + } + static void sub(CipherText& z, const CipherText& x, const CipherText& y) + { + if (x.isMultiplied() && y.isMultiplied()) { + z.isMultiplied_ = true; + CipherTextGT::sub(z.m_, x.m_, y.m_); + return; + } + if (!x.isMultiplied() && !y.isMultiplied()) { + z.isMultiplied_ = false; + CipherTextA::sub(z.a_, x.a_, y.a_); + return; + } + throw cybozu::Exception("she:CipherText:sub:mixed CipherText"); + } + static void neg(CipherText& y, const CipherText& x) + { + if (x.isMultiplied()) { + y.isMultiplied_ = true; + CipherTextGT::neg(y.m_, x.m_); + return; + } else { + y.isMultiplied_ = false; + CipherTextA::neg(y.a_, x.a_); + return; + } + } + static void mul(CipherText& z, const CipherText& x, const CipherText& y) + { + if (x.isMultiplied() || y.isMultiplied()) { + throw cybozu::Exception("she:CipherText:mul:mixed CipherText"); + } + z.isMultiplied_ = true; + CipherTextGT::mul(z.m_, x.a_, y.a_); + } + static void mul(CipherText& z, const CipherText& x, int64_t y) + { + if (x.isMultiplied()) { + CipherTextGT::mul(z.m_, x.m_, y); + } else { + CipherTextA::mul(z.a_, x.a_, y); + } + } + void add(const CipherText& c) { add(*this, *this, c); } + void sub(const CipherText& c) { sub(*this, *this, c); } + void mul(const CipherText& c) { mul(*this, *this, c); } + template<class InputStream> + void load(bool *pb, InputStream& is, int ioMode = IoSerialize) + { + cybozu::writeChar(pb, isMultiplied_ ? '0' : '1', is); if (!*pb) return; + if (isMultiplied()) { + m_.load(pb, is, ioMode); + } else { + a_.load(pb, is, ioMode); + } + } + template<class OutputStream> + void save(bool *pb, OutputStream& os, int ioMode = IoSerialize) const + { + char c; + if (!cybozu::readChar(&c, os)) return; + if (c == '0' || c == '1') { + isMultiplied_ = c == '0'; + } else { + *pb = false; + return; + } + if (isMultiplied()) { + m_.save(pb, os, ioMode); + } else { + a_.save(pb, os, ioMode); + } + } + template<class InputStream> + void load(InputStream& is, int ioMode = IoSerialize) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("she:CipherText:load"); + } + template<class OutputStream> + void save(OutputStream& os, int ioMode = IoSerialize) const + { + bool b; + save(&b, os, ioMode); + if (!b) throw cybozu::Exception("she:CipherText:save"); + } + friend std::istream& operator>>(std::istream& is, CipherText& self) + { + self.load(is, fp::detectIoMode(G1::getIoMode(), is)); + return is; + } + friend std::ostream& operator<<(std::ostream& os, const CipherText& self) + { + self.save(os, fp::detectIoMode(G1::getIoMode(), os)); + return os; + } + bool operator==(const CipherTextGT& rhs) const + { + if (isMultiplied() != rhs.isMultiplied()) return false; + if (isMultiplied()) { + return m_ == rhs.m_; + } + return a_ == rhs.a_; + } + bool operator!=(const CipherTextGT& rhs) const { return !operator==(rhs); } + }; +}; +typedef local::HashTable<G1> HashTableG1; +typedef local::HashTable<G2> HashTableG2; +typedef local::HashTable<Fp12, false> HashTableGT; + +template<size_t dummyInpl> G1 SHET<dummyInpl>::P_; +template<size_t dummyInpl> G2 SHET<dummyInpl>::Q_; +template<size_t dummyInpl> Fp12 SHET<dummyInpl>::ePQ_; +template<size_t dummyInpl> std::vector<Fp6> SHET<dummyInpl>::Qcoeff_; +template<size_t dummyInpl> HashTableG1 SHET<dummyInpl>::PhashTbl_; +template<size_t dummyInpl> HashTableG2 SHET<dummyInpl>::QhashTbl_; +template<size_t dummyInpl> HashTableGT SHET<dummyInpl>::ePQhashTbl_; +template<size_t dummyInpl> bool SHET<dummyInpl>::useDecG1ViaGT_; +template<size_t dummyInpl> bool SHET<dummyInpl>::useDecG2ViaGT_; +template<size_t dummyInpl> bool SHET<dummyInpl>::isG1only_; +typedef mcl::she::SHET<> SHE; +typedef SHE::SecretKey SecretKey; +typedef SHE::PublicKey PublicKey; +typedef SHE::PrecomputedPublicKey PrecomputedPublicKey; +typedef SHE::CipherTextG1 CipherTextG1; +typedef SHE::CipherTextG2 CipherTextG2; +typedef SHE::CipherTextGT CipherTextGT; +typedef SHE::CipherTextA CipherTextA; +typedef CipherTextGT CipherTextGM; // old class +typedef SHE::CipherText CipherText; +typedef SHE::ZkpBin ZkpBin; +typedef SHE::ZkpEq ZkpEq; +typedef SHE::ZkpBinEq ZkpBinEq; + +inline void init(const mcl::CurveParam& cp = mcl::BN254, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum) +{ + SHE::init(cp, hashSize, tryNum); +} +inline void initG1only(const mcl::EcParam& para, size_t hashSize = 1024, size_t tryNum = local::defaultTryNum) +{ + SHE::initG1only(para, hashSize, tryNum); +} +inline void init(size_t hashSize, size_t tryNum = local::defaultTryNum) { SHE::init(hashSize, tryNum); } +inline void setRangeForG1DLP(size_t hashSize) { SHE::setRangeForG1DLP(hashSize); } +inline void setRangeForG2DLP(size_t hashSize) { SHE::setRangeForG2DLP(hashSize); } +inline void setRangeForGTDLP(size_t hashSize) { SHE::setRangeForGTDLP(hashSize); } +inline void setRangeForDLP(size_t hashSize) { SHE::setRangeForDLP(hashSize); } +inline void setTryNum(size_t tryNum) { SHE::setTryNum(tryNum); } +inline void useDecG1ViaGT(bool use = true) { SHE::useDecG1ViaGT(use); } +inline void useDecG2ViaGT(bool use = true) { SHE::useDecG2ViaGT(use); } +inline HashTableG1& getHashTableG1() { return SHE::PhashTbl_; } +inline HashTableG2& getHashTableG2() { return SHE::QhashTbl_; } +inline HashTableGT& getHashTableGT() { return SHE::ePQhashTbl_; } + +inline void add(CipherTextG1& z, const CipherTextG1& x, const CipherTextG1& y) { CipherTextG1::add(z, x, y); } +inline void add(CipherTextG2& z, const CipherTextG2& x, const CipherTextG2& y) { CipherTextG2::add(z, x, y); } +inline void add(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y) { CipherTextGT::add(z, x, y); } +inline void add(CipherText& z, const CipherText& x, const CipherText& y) { CipherText::add(z, x, y); } + +inline void sub(CipherTextG1& z, const CipherTextG1& x, const CipherTextG1& y) { CipherTextG1::sub(z, x, y); } +inline void sub(CipherTextG2& z, const CipherTextG2& x, const CipherTextG2& y) { CipherTextG2::sub(z, x, y); } +inline void sub(CipherTextGT& z, const CipherTextGT& x, const CipherTextGT& y) { CipherTextGT::sub(z, x, y); } +inline void sub(CipherText& z, const CipherText& x, const CipherText& y) { CipherText::sub(z, x, y); } + +inline void neg(CipherTextG1& y, const CipherTextG1& x) { CipherTextG1::neg(y, x); } +inline void neg(CipherTextG2& y, const CipherTextG2& x) { CipherTextG2::neg(y, x); } +inline void neg(CipherTextGT& y, const CipherTextGT& x) { CipherTextGT::neg(y, x); } +inline void neg(CipherText& y, const CipherText& x) { CipherText::neg(y, x); } + +template<class INT> +inline void mul(CipherTextG1& z, const CipherTextG1& x, const INT& y) { CipherTextG1::mul(z, x, y); } +template<class INT> +inline void mul(CipherTextG2& z, const CipherTextG2& x, const INT& y) { CipherTextG2::mul(z, x, y); } +template<class INT> +inline void mul(CipherTextGT& z, const CipherTextGT& x, const INT& y) { CipherTextGT::mul(z, x, y); } +template<class INT> +inline void mul(CipherText& z, const CipherText& x, const INT& y) { CipherText::mul(z, x, y); } + +inline void mul(CipherTextGT& z, const CipherTextG1& x, const CipherTextG2& y) { CipherTextGT::mul(z, x, y); } +inline void mul(CipherText& z, const CipherText& x, const CipherText& y) { CipherText::mul(z, x, y); } + +} } // mcl::she + diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/util.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/util.hpp new file mode 100644 index 000000000..edef971cb --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/util.hpp @@ -0,0 +1,285 @@ +#pragma once +/** + @file + @brief functions for T[] + @author MITSUNARI Shigeo(@herumi) + @license modified new BSD license + http://opensource.org/licenses/BSD-3-Clause +*/ +#include <cybozu/bit_operation.hpp> + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable : 4456) + #pragma warning(disable : 4459) +#endif + +namespace mcl { namespace fp { + +template<class T> +T abs_(T x) { return x < 0 ? -x : x; } + +template<class T> +T min_(T x, T y) { return x < y ? x : y; } + +template<class T> +T max_(T x, T y) { return x < y ? y : x; } + +template<class T> +void swap_(T& x, T& y) +{ + T t; + t = x; + x = y; + y = t; +} + + +/* + get pp such that p * pp = -1 mod M, + where p is prime and M = 1 << 64(or 32). + @param pLow [in] p mod M +*/ +template<class T> +T getMontgomeryCoeff(T pLow) +{ + T ret = 0; + T t = 0; + T x = 1; + for (size_t i = 0; i < sizeof(T) * 8; i++) { + if ((t & 1) == 0) { + t += pLow; + ret += x; + } + t >>= 1; + x <<= 1; + } + return ret; +} + +template<class T> +int compareArray(const T* x, const T* y, size_t n) +{ + for (size_t i = n - 1; i != size_t(-1); i--) { + T a = x[i]; + T b = y[i]; + if (a != b) return a < b ? -1 : 1; + } + return 0; +} + +template<class T> +bool isLessArray(const T *x, const T* y, size_t n) +{ + for (size_t i = n - 1; i != size_t(-1); i--) { + T a = x[i]; + T b = y[i]; + if (a != b) return a < b; + } + return false; +} + +template<class T> +bool isGreaterOrEqualArray(const T *x, const T* y, size_t n) +{ + return !isLessArray(x, y, n); +} + +template<class T> +bool isLessOrEqualArray(const T *x, const T* y, size_t n) +{ + for (size_t i = n - 1; i != size_t(-1); i--) { + T a = x[i]; + T b = y[i]; + if (a != b) return a < b; + } + return true; +} + +template<class T> +bool isGreaterArray(const T *x, const T* y, size_t n) +{ + return !isLessOrEqualArray(x, y, n); +} + +template<class T> +bool isEqualArray(const T* x, const T* y, size_t n) +{ + for (size_t i = 0; i < n; i++) { + if (x[i] != y[i]) return false; + } + return true; +} + +template<class T> +bool isZeroArray(const T *x, size_t n) +{ + for (size_t i = 0; i < n; i++) { + if (x[i]) return false; + } + return true; +} + +template<class T> +void clearArray(T *x, size_t begin, size_t end) +{ + for (size_t i = begin; i < end; i++) x[i] = 0; +} + +template<class T> +void copyArray(T *y, const T *x, size_t n) +{ + for (size_t i = 0; i < n; i++) y[i] = x[i]; +} + +/* + x &= (1 << bitSize) - 1 +*/ +template<class T> +void maskArray(T *x, size_t n, size_t bitSize) +{ + const size_t TbitSize = sizeof(T) * 8; + assert(bitSize <= TbitSize * n); + const size_t q = bitSize / TbitSize; + const size_t r = bitSize % TbitSize; + if (r) { + x[q] &= (T(1) << r) - 1; + clearArray(x, q + 1, n); + } else { + clearArray(x, q, n); + } +} + +/* + return non zero size of x[] + return 1 if x[] == 0 +*/ +template<class T> +size_t getNonZeroArraySize(const T *x, size_t n) +{ + assert(n > 0); + while (n > 0) { + if (x[n - 1]) return n; + n--; + } + return 1; +} + +/* + @param out [inout] : set element of G ; out = x^y[] + @param x [in] + @param y [in] + @param n [in] size of y[] + @param limitBit [in] const time version if the value is positive + @note &out != x and out = the unit element of G +*/ +template<class G, class Mul, class Sqr, class T> +void powGeneric(G& out, const G& x, const T *y, size_t n, const Mul& mul, const Sqr& sqr, void normalize(G&, const G&), size_t limitBit = 0) +{ + assert(&out != &x); + G tbl[4]; // tbl = { discard, x, x^2, x^3 } + T v; + bool constTime = limitBit > 0; + int maxBit = 0; + int m = 0; + while (n > 0) { + if (y[n - 1]) break; + n--; + } + if (n == 0) { + if (constTime) goto DummyLoop; + return; + } + if (!constTime && n == 1) { + switch (y[0]) { + case 1: + out = x; + return; + case 2: + sqr(out, x); + return; + case 3: + sqr(out, x); + mul(out, out, x); + return; + case 4: + sqr(out, x); + sqr(out, out); + return; + } + } + if (normalize != 0) { + normalize(tbl[0], x); + } else { + tbl[0] = x; + } + tbl[1] = tbl[0]; + sqr(tbl[2], tbl[1]); + if (normalize != 0) { normalize(tbl[2], tbl[2]); } + mul(tbl[3], tbl[2], x); + if (normalize != 0) { normalize(tbl[3], tbl[3]); } + v = y[n - 1]; + assert(v); + m = cybozu::bsr<T>(v); + maxBit = int(m + (n - 1) * sizeof(T) * 8); + if (m & 1) { + m--; + T idx = (v >> m) & 3; + assert(idx > 0); + out = tbl[idx]; + } else { + out = x; + } + for (int i = (int)n - 1; i >= 0; i--) { + T v = y[i]; + for (int j = m - 2; j >= 0; j -= 2) { + sqr(out, out); + sqr(out, out); + T idx = (v >> j) & 3; + if (idx == 0) { + if (constTime) mul(tbl[0], tbl[0], tbl[1]); + } else { + mul(out, out, tbl[idx]); + } + } + m = (int)sizeof(T) * 8; + } +DummyLoop: + if (!constTime) return; + G D = out; + for (size_t i = maxBit + 1; i < limitBit; i += 2) { + sqr(D, D); + sqr(D, D); + mul(D, D, tbl[1]); + } +} + +/* + shortcut of multiplication by Unit +*/ +template<class T, class U> +bool mulSmallUnit(T& z, const T& x, U y) +{ + switch (y) { + case 0: z.clear(); break; + case 1: z = x; break; + case 2: T::add(z, x, x); break; + case 3: { T t; T::add(t, x, x); T::add(z, t, x); break; } + case 4: T::add(z, x, x); T::add(z, z, z); break; + case 5: { T t; T::add(t, x, x); T::add(t, t, t); T::add(z, t, x); break; } + case 6: { T t; T::add(t, x, x); T::add(t, t, x); T::add(z, t, t); break; } + case 7: { T t; T::add(t, x, x); T::add(t, t, t); T::add(t, t, t); T::sub(z, t, x); break; } + case 8: T::add(z, x, x); T::add(z, z, z); T::add(z, z, z); break; + case 9: { T t; T::add(t, x, x); T::add(t, t, t); T::add(t, t, t); T::add(z, t, x); break; } + case 10: { T t; T::add(t, x, x); T::add(t, t, t); T::add(t, t, x); T::add(z, t, t); break; } + default: + return false; + } + return true; +} + +} } // mcl::fp + +#ifdef _MSC_VER + #pragma warning(pop) +#endif diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/vint.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/vint.hpp new file mode 100644 index 000000000..32a51bb64 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/vint.hpp @@ -0,0 +1,1926 @@ +#pragma once +/** + emulate mpz_class +*/ +#include <cybozu/exception.hpp> +#include <cybozu/bit_operation.hpp> +#include <cybozu/xorshift.hpp> +#include <assert.h> +#ifndef CYBOZU_DONT_USE_STRING +#include <iostream> +#endif +#include <mcl/array.hpp> +#include <mcl/util.hpp> +#include <mcl/randgen.hpp> +#include <mcl/conversion.hpp> + +#if defined(__EMSCRIPTEN__) || defined(__wasm__) + #define MCL_VINT_64BIT_PORTABLE + #define MCL_VINT_FIXED_BUFFER +#endif +#ifndef MCL_MAX_BIT_SIZE + #define MCL_MAX_BIT_SIZE 384 +#endif + +#ifndef MCL_SIZEOF_UNIT + #if defined(CYBOZU_OS_BIT) && (CYBOZU_OS_BIT == 32) + #define MCL_SIZEOF_UNIT 4 + #else + #define MCL_SIZEOF_UNIT 8 + #endif +#endif + +namespace mcl { + +namespace vint { + +#if MCL_SIZEOF_UNIT == 8 +typedef uint64_t Unit; +#else +typedef uint32_t Unit; +#endif + +template<class T> +void dump(const T *x, size_t n, const char *msg = "") +{ + const size_t is4byteUnit = sizeof(*x) == 4; + if (msg) printf("%s ", msg); + for (size_t i = 0; i < n; i++) { + if (is4byteUnit) { + printf("%08x", (uint32_t)x[n - 1 - i]); + } else { + printf("%016llx", (unsigned long long)x[n - 1 - i]); + } + } + printf("\n"); +} + +inline uint64_t make64(uint32_t H, uint32_t L) +{ + return ((uint64_t)H << 32) | L; +} + +inline void split64(uint32_t *H, uint32_t *L, uint64_t x) +{ + *H = uint32_t(x >> 32); + *L = uint32_t(x); +} + +/* + [H:L] <= x * y + @return L +*/ +inline uint32_t mulUnit(uint32_t *pH, uint32_t x, uint32_t y) +{ + uint64_t t = uint64_t(x) * y; + uint32_t L; + split64(pH, &L, t); + return L; +} +#if MCL_SIZEOF_UNIT == 8 +inline uint64_t mulUnit(uint64_t *pH, uint64_t x, uint64_t y) +{ +#ifdef MCL_VINT_64BIT_PORTABLE + uint32_t a = uint32_t(x >> 32); + uint32_t b = uint32_t(x); + uint32_t c = uint32_t(y >> 32); + uint32_t d = uint32_t(y); + + uint64_t ad = uint64_t(d) * a; + uint64_t bd = uint64_t(d) * b; + uint64_t L = uint32_t(bd); + ad += bd >> 32; // [ad:L] + + uint64_t ac = uint64_t(c) * a; + uint64_t bc = uint64_t(c) * b; + uint64_t H = uint32_t(bc); + ac += bc >> 32; // [ac:H] + /* + adL + acH + */ + uint64_t t = (ac << 32) | H; + ac >>= 32; + H = t + ad; + if (H < t) { + ac++; + } + /* + ac:H:L + */ + L |= H << 32; + H = (ac << 32) | uint32_t(H >> 32); + *pH = H; + return L; +#elif defined(_WIN64) && !defined(__INTEL_COMPILER) + return _umul128(x, y, pH); +#else + typedef __attribute__((mode(TI))) unsigned int uint128; + uint128 t = uint128(x) * y; + *pH = uint64_t(t >> 64); + return uint64_t(t); +#endif +} +#endif + +template<class T> +void divNM(T *q, size_t qn, T *r, const T *x, size_t xn, const T *y, size_t yn); + +/* + q = [H:L] / y + r = [H:L] % y + return q +*/ +inline uint32_t divUnit(uint32_t *pr, uint32_t H, uint32_t L, uint32_t y) +{ + uint64_t t = make64(H, L); + uint32_t q = uint32_t(t / y); + *pr = uint32_t(t % y); + return q; +} +#if MCL_SIZEOF_UNIT == 8 +inline uint64_t divUnit(uint64_t *pr, uint64_t H, uint64_t L, uint64_t y) +{ +#if defined(MCL_VINT_64BIT_PORTABLE) + uint32_t px[4] = { uint32_t(L), uint32_t(L >> 32), uint32_t(H), uint32_t(H >> 32) }; + uint32_t py[2] = { uint32_t(y), uint32_t(y >> 32) }; + size_t xn = 4; + size_t yn = 2; + uint32_t q[4]; + uint32_t r[2]; + size_t qn = xn - yn + 1; + divNM(q, qn, r, px, xn, py, yn); + *pr = make64(r[1], r[0]); + return make64(q[1], q[0]); +#elif defined(_MSC_VER) + #error "divUnit for uint64_t is not supported" +#else + typedef __attribute__((mode(TI))) unsigned int uint128; + uint128 t = (uint128(H) << 64) | L; + uint64_t q = uint64_t(t / y); + *pr = uint64_t(t % y); + return q; +#endif +} +#endif + +/* + compare x[] and y[] + @retval positive if x > y + @retval 0 if x == y + @retval negative if x < y +*/ +template<class T> +int compareNM(const T *x, size_t xn, const T *y, size_t yn) +{ + assert(xn > 0 && yn > 0); + if (xn != yn) return xn > yn ? 1 : -1; + for (int i = (int)xn - 1; i >= 0; i--) { + if (x[i] != y[i]) return x[i] > y[i] ? 1 : -1; + } + return 0; +} + +template<class T> +void clearN(T *x, size_t n) +{ + for (size_t i = 0; i < n; i++) x[i] = 0; +} + +template<class T> +void copyN(T *y, const T *x, size_t n) +{ + for (size_t i = 0; i < n; i++) y[i] = x[i]; +} + +/* + z[] = x[n] + y[n] + @note return 1 if having carry + z may be equal to x or y +*/ +template<class T> +T addN(T *z, const T *x, const T *y, size_t n) +{ + T c = 0; + for (size_t i = 0; i < n; i++) { + T xc = x[i] + c; + if (xc < c) { + // x[i] = Unit(-1) and c = 1 + z[i] = y[i]; + } else { + xc += y[i]; + c = y[i] > xc ? 1 : 0; + z[i] = xc; + } + } + return c; +} + +/* + z[] = x[] + y +*/ +template<class T> +T addu1(T *z, const T *x, size_t n, T y) +{ + assert(n > 0); + T t = x[0] + y; + z[0] = t; + size_t i = 0; + if (t >= y) goto EXIT_0; + i = 1; + for (; i < n; i++) { + t = x[i] + 1; + z[i] = t; + if (t != 0) goto EXIT_0; + } + return 1; +EXIT_0: + i++; + for (; i < n; i++) { + z[i] = x[i]; + } + return 0; +} + +/* + x[] += y +*/ +template<class T> +T addu1(T *x, size_t n, T y) +{ + assert(n > 0); + T t = x[0] + y; + x[0] = t; + size_t i = 0; + if (t >= y) return 0; + i = 1; + for (; i < n; i++) { + t = x[i] + 1; + x[i] = t; + if (t != 0) return 0; + } + return 1; +} +/* + z[zn] = x[xn] + y[yn] + @note zn = max(xn, yn) +*/ +template<class T> +T addNM(T *z, const T *x, size_t xn, const T *y, size_t yn) +{ + if (yn > xn) { + fp::swap_(xn, yn); + fp::swap_(x, y); + } + assert(xn >= yn); + size_t max = xn; + size_t min = yn; + T c = vint::addN(z, x, y, min); + if (max > min) { + c = vint::addu1(z + min, x + min, max - min, c); + } + return c; +} + +/* + z[] = x[n] - y[n] + z may be equal to x or y +*/ +template<class T> +T subN(T *z, const T *x, const T *y, size_t n) +{ + assert(n > 0); + T c = 0; + for (size_t i = 0; i < n; i++) { + T yc = y[i] + c; + if (yc < c) { + // y[i] = T(-1) and c = 1 + z[i] = x[i]; + } else { + c = x[i] < yc ? 1 : 0; + z[i] = x[i] - yc; + } + } + return c; +} + +/* + out[] = x[n] - y +*/ +template<class T> +T subu1(T *z, const T *x, size_t n, T y) +{ + assert(n > 0); +#if 0 + T t = x[0]; + z[0] = t - y; + size_t i = 0; + if (t >= y) goto EXIT_0; + i = 1; + for (; i < n; i++ ){ + t = x[i]; + z[i] = t - 1; + if (t != 0) goto EXIT_0; + } + return 1; +EXIT_0: + i++; + for (; i < n; i++) { + z[i] = x[i]; + } + return 0; +#else + T c = x[0] < y ? 1 : 0; + z[0] = x[0] - y; + for (size_t i = 1; i < n; i++) { + if (x[i] < c) { + z[i] = T(-1); + } else { + z[i] = x[i] - c; + c = 0; + } + } + return c; +#endif +} + +/* + z[xn] = x[xn] - y[yn] + @note xn >= yn +*/ +template<class T> +T subNM(T *z, const T *x, size_t xn, const T *y, size_t yn) +{ + assert(xn >= yn); + T c = vint::subN(z, x, y, yn); + if (xn > yn) { + c = vint::subu1(z + yn, x + yn, xn - yn, c); + } + return c; +} + +/* + z[0..n) = x[0..n) * y + return z[n] + @note accept z == x +*/ +template<class T> +T mulu1(T *z, const T *x, size_t n, T y) +{ + assert(n > 0); + T H = 0; + for (size_t i = 0; i < n; i++) { + T t = H; + T L = mulUnit(&H, x[i], y); + z[i] = t + L; + if (z[i] < t) { + H++; + } + } + return H; // z[n] +} + +/* + z[xn * yn] = x[xn] * y[ym] +*/ +template<class T> +static inline void mulNM(T *z, const T *x, size_t xn, const T *y, size_t yn) +{ + assert(xn > 0 && yn > 0); + if (yn > xn) { + fp::swap_(yn, xn); + fp::swap_(x, y); + } + assert(xn >= yn); + if (z == x) { + T *p = (T*)CYBOZU_ALLOCA(sizeof(T) * xn); + copyN(p, x, xn); + x = p; + } + if (z == y) { + T *p = (T*)CYBOZU_ALLOCA(sizeof(T) * yn); + copyN(p, y, yn); + y = p; + } + z[xn] = vint::mulu1(&z[0], x, xn, y[0]); + clearN(z + xn + 1, yn - 1); + + T *t2 = (T*)CYBOZU_ALLOCA(sizeof(T) * (xn + 1)); + for (size_t i = 1; i < yn; i++) { + t2[xn] = vint::mulu1(&t2[0], x, xn, y[i]); + vint::addN(&z[i], &z[i], &t2[0], xn + 1); + } +} +/* + out[xn * 2] = x[xn] * x[xn] + QQQ : optimize this +*/ +template<class T> +static inline void sqrN(T *y, const T *x, size_t xn) +{ + mulNM(y, x, xn, x, xn); +} + +/* + q[] = x[] / y + @retval r = x[] % y + accept q == x +*/ +template<class T> +T divu1(T *q, const T *x, size_t n, T y) +{ + T r = 0; + for (int i = (int)n - 1; i >= 0; i--) { + q[i] = divUnit(&r, r, x[i], y); + } + return r; +} +/* + q[] = x[] / y + @retval r = x[] % y +*/ +template<class T> +T modu1(const T *x, size_t n, T y) +{ + T r = 0; + for (int i = (int)n - 1; i >= 0; i--) { + divUnit(&r, r, x[i], y); + } + return r; +} + +/* + y[] = x[] << bit + 0 < bit < sizeof(T) * 8 + accept y == x +*/ +template<class T> +T shlBit(T *y, const T *x, size_t xn, size_t bit) +{ + assert(0 < bit && bit < sizeof(T) * 8); + assert(xn > 0); + size_t rBit = sizeof(T) * 8 - bit; + T keep = x[xn - 1]; + T prev = keep; + for (size_t i = xn - 1; i > 0; i--) { + T t = x[i - 1]; + y[i] = (prev << bit) | (t >> rBit); + prev = t; + } + y[0] = prev << bit; + return keep >> rBit; +} + +/* + y[yn] = x[xn] << bit + yn = xn + (bit + unitBitBit - 1) / unitBitSize + accept y == x +*/ +template<class T> +void shlN(T *y, const T *x, size_t xn, size_t bit) +{ + assert(xn > 0); + const size_t unitBitSize = sizeof(T) * 8; + size_t q = bit / unitBitSize; + size_t r = bit % unitBitSize; + if (r == 0) { + // don't use copyN(y + q, x, xn); if overlaped + for (size_t i = 0; i < xn; i++) { + y[q + xn - 1 - i] = x[xn - 1 - i]; + } + } else { + y[q + xn] = shlBit(y + q, x, xn, r); + } + clearN(y, q); +} + +/* + y[] = x[] >> bit + 0 < bit < sizeof(T) * 8 +*/ +template<class T> +void shrBit(T *y, const T *x, size_t xn, size_t bit) +{ + assert(0 < bit && bit < sizeof(T) * 8); + assert(xn > 0); + size_t rBit = sizeof(T) * 8 - bit; + T prev = x[0]; + for (size_t i = 1; i < xn; i++) { + T t = x[i]; + y[i - 1] = (prev >> bit) | (t << rBit); + prev = t; + } + y[xn - 1] = prev >> bit; +} +/* + y[yn] = x[xn] >> bit + yn = xn - bit / unitBit +*/ +template<class T> +void shrN(T *y, const T *x, size_t xn, size_t bit) +{ + assert(xn > 0); + const size_t unitBitSize = sizeof(T) * 8; + size_t q = bit / unitBitSize; + size_t r = bit % unitBitSize; + assert(xn >= q); + if (r == 0) { + copyN(y, x + q, xn - q); + } else { + shrBit(y, x + q, xn - q, r); + } +} + +template<class T> +size_t getRealSize(const T *x, size_t xn) +{ + int i = (int)xn - 1; + for (; i > 0; i--) { + if (x[i]) { + return i + 1; + } + } + return 1; +} + +template<class T> +size_t getBitSize(const T *x, size_t n) +{ + if (n == 1 && x[0] == 0) return 1; + T v = x[n - 1]; + assert(v); + return (n - 1) * sizeof(T) * 8 + 1 + cybozu::bsr<Unit>(v); +} + +/* + q[qn] = x[xn] / y[yn] ; qn == xn - yn + 1 if xn >= yn if q + r[rn] = x[xn] % y[yn] ; rn = yn before getRealSize + allow q == 0 +*/ +template<class T> +void divNM(T *q, size_t qn, T *r, const T *x, size_t xn, const T *y, size_t yn) +{ + assert(xn > 0 && yn > 0); + assert(xn < yn || (q == 0 || qn == xn - yn + 1)); + assert(q != r); + const size_t rn = yn; + xn = getRealSize(x, xn); + yn = getRealSize(y, yn); + if (x == y) { + assert(xn == yn); + clearN(r, rn); + if (q) { + q[0] = 1; + clearN(q + 1, qn - 1); + } + return; + } + if (yn > xn) { + /* + if y > x then q = 0 and r = x + */ + copyN(r, x, xn); + clearN(r + xn, rn - xn); + if (q) clearN(q, qn); + return; + } + if (yn == 1) { + T t; + if (q) { + if (qn > xn) { + clearN(q + xn, qn - xn); + } + t = divu1(q, x, xn, y[0]); + } else { + t = modu1(x, xn, y[0]); + } + r[0] = t; + clearN(r + 1, rn - 1); + return; + } + assert(yn >= 2); + /* + bitwise left shift x and y to adjust MSB of y[yn - 1] = 1 + */ + const size_t shift = sizeof(T) * 8 - 1 - cybozu::bsr(y[yn - 1]); + T *xx = (T*)CYBOZU_ALLOCA(sizeof(T) * (xn + 1)); + const T *yy; + if (shift) { + T v = shlBit(xx, x, xn, shift); + if (v) { + xx[xn] = v; + xn++; + } + T *yBuf = (T*)CYBOZU_ALLOCA(sizeof(T) * yn); + shlBit(yBuf, y, yn ,shift); + yy = yBuf; + } else { + copyN(xx, x, xn); + yy = y; + } + if (q) { + clearN(q, qn); + } + assert((yy[yn - 1] >> (sizeof(T) * 8 - 1)) != 0); + T *tt = (T*)CYBOZU_ALLOCA(sizeof(T) * (yn + 1)); + while (xn > yn) { + size_t d = xn - yn; + T xTop = xx[xn - 1]; + T yTop = yy[yn - 1]; + if (xTop > yTop || (compareNM(xx + d, xn - d, yy, yn) >= 0)) { + vint::subN(xx + d, xx + d, yy, yn); + xn = getRealSize(xx, xn); + if (q) vint::addu1<T>(q + d, qn - d, 1); + continue; + } + if (xTop == 1) { + vint::subNM(xx + d - 1, xx + d - 1, xn - d + 1, yy, yn); + xn = getRealSize(xx, xn); + if (q) vint::addu1<T>(q + d - 1, qn - d + 1, 1); + continue; + } + tt[yn] = vint::mulu1(tt, yy, yn, xTop); + vint::subN(xx + d - 1, xx + d - 1, tt, yn + 1); + xn = getRealSize(xx, xn); + if (q) vint::addu1<T>(q + d - 1, qn - d + 1, xTop); + } + if (xn == yn && compareNM(xx, xn, yy, yn) >= 0) { + subN(xx, xx, yy, yn); + xn = getRealSize(xx, xn); + if (q) vint::addu1<T>(q, qn, 1); + } + if (shift) { + shrBit(r, xx, xn, shift); + } else { + copyN(r, xx, xn); + } + clearN(r + xn, rn - xn); +} + +#ifndef MCL_VINT_FIXED_BUFFER +template<class T> +class Buffer { + size_t allocSize_; + T *ptr_; +public: + typedef T Unit; + Buffer() : allocSize_(0), ptr_(0) {} + ~Buffer() + { + clear(); + } + Buffer(const Buffer& rhs) + : allocSize_(rhs.allocSize_) + , ptr_(0) + { + ptr_ = (T*)malloc(allocSize_ * sizeof(T)); + if (ptr_ == 0) throw cybozu::Exception("Buffer:malloc") << rhs.allocSize_; + memcpy(ptr_, rhs.ptr_, allocSize_ * sizeof(T)); + } + Buffer& operator=(const Buffer& rhs) + { + Buffer t(rhs); + swap(t); + return *this; + } + void swap(Buffer& rhs) +#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 + noexcept +#endif + { + fp::swap_(allocSize_, rhs.allocSize_); + fp::swap_(ptr_, rhs.ptr_); + } + void clear() + { + allocSize_ = 0; + free(ptr_); + ptr_ = 0; + } + + /* + @note extended buffer may be not cleared + */ + void alloc(bool *pb, size_t n) + { + if (n > allocSize_) { + T *p = (T*)malloc(n * sizeof(T)); + if (p == 0) { + *pb = false; + return; + } + copyN(p, ptr_, allocSize_); + free(ptr_); + ptr_ = p; + allocSize_ = n; + } + *pb = true; + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void alloc(size_t n) + { + bool b; + alloc(&b, n); + if (!b) throw cybozu::Exception("Buffer:alloc"); + } +#endif + /* + *this = rhs + rhs may be destroyed + */ + const T& operator[](size_t n) const { return ptr_[n]; } + T& operator[](size_t n) { return ptr_[n]; } +}; +#endif + +template<class T, size_t BitLen> +class FixedBuffer { + enum { + N = (BitLen + sizeof(T) * 8 - 1) / (sizeof(T) * 8) + }; + size_t size_; + T v_[N]; +public: + typedef T Unit; + FixedBuffer() + : size_(0) + { + } + FixedBuffer(const FixedBuffer& rhs) + { + operator=(rhs); + } + FixedBuffer& operator=(const FixedBuffer& rhs) + { + size_ = rhs.size_; + for (size_t i = 0; i < size_; i++) { + v_[i] = rhs.v_[i]; + } + return *this; + } + void clear() { size_ = 0; } + void alloc(bool *pb, size_t n) + { + if (n > N) { + *pb = false; + return; + } + size_ = n; + *pb = true; + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void alloc(size_t n) + { + bool b; + alloc(&b, n); + if (!b) throw cybozu::Exception("FixedBuffer:alloc"); + } +#endif + void swap(FixedBuffer& rhs) + { + FixedBuffer *p1 = this; + FixedBuffer *p2 = &rhs; + if (p1->size_ < p2->size_) { + fp::swap_(p1, p2); + } + assert(p1->size_ >= p2->size_); + for (size_t i = 0; i < p2->size_; i++) { + fp::swap_(p1->v_[i], p2->v_[i]); + } + for (size_t i = p2->size_; i < p1->size_; i++) { + p2->v_[i] = p1->v_[i]; + } + fp::swap_(p1->size_, p2->size_); + } + // to avoid warning of gcc + void verify(size_t n) const + { + assert(n <= N); + (void)n; + } + const T& operator[](size_t n) const { verify(n); return v_[n]; } + T& operator[](size_t n) { verify(n); return v_[n]; } +}; + +#if MCL_SIZEOF_UNIT == 8 +/* + M = 1 << 256 + a = M mod p = (1 << 32) + 0x3d1 + [H:L] mod p = H * a + L + + if H = L = M - 1, t = H * a + L = aM + (M - a - 1) + H' = a, L' = M - a - 1 + t' = H' * a + L' = M + (a^2 - a - 1) + H'' = 1, L'' = a^2 - a - 1 + t'' = H'' * a + L'' = a^2 - 1 +*/ +inline void mcl_fpDbl_mod_SECP256K1(Unit *z, const Unit *x, const Unit *p) +{ + const Unit a = (uint64_t(1) << 32) + 0x3d1; + Unit buf[5]; + buf[4] = mulu1(buf, x + 4, 4, a); // H * a + buf[4] += addN(buf, buf, x, 4); // t = H * a + L + Unit x2[2]; + x2[0] = mulUnit(&x2[1], buf[4], a); + Unit x3 = addN(buf, buf, x2, 2); + if (x3) { + x3 = addu1(buf + 2, buf + 2, 2, Unit(1)); // t' = H' * a + L' + if (x3) { + x3 = addu1(buf, buf, 4, a); + assert(x3 == 0); + } + } + if (fp::isGreaterOrEqualArray(buf, p, 4)) { + subN(z, buf, p, 4); + } else { + fp::copyArray(z, buf, 4); + } +} + +inline void mcl_fp_mul_SECP256K1(Unit *z, const Unit *x, const Unit *y, const Unit *p) +{ + Unit xy[8]; + mulNM(xy, x, 4, y, 4); + mcl_fpDbl_mod_SECP256K1(z, xy, p); +} +inline void mcl_fp_sqr_SECP256K1(Unit *y, const Unit *x, const Unit *p) +{ + Unit xx[8]; + sqrN(xx, x, 4); + mcl_fpDbl_mod_SECP256K1(y, xx, p); +} +#endif + +} // vint + +/** + signed integer with variable length +*/ +template<class _Buffer> +class VintT { +public: + typedef _Buffer Buffer; + typedef typename Buffer::Unit Unit; + static const size_t unitBitSize = sizeof(Unit) * 8; + static const int invalidVar = -2147483647 - 1; // abs(invalidVar) is not defined +private: + Buffer buf_; + size_t size_; + bool isNeg_; + void trim(size_t n) + { + assert(n > 0); + int i = (int)n - 1; + for (; i > 0; i--) { + if (buf_[i]) { + size_ = i + 1; + return; + } + } + size_ = 1; + // zero + if (buf_[0] == 0) { + isNeg_ = false; + } + } + static int ucompare(const Buffer& x, size_t xn, const Buffer& y, size_t yn) + { + return vint::compareNM(&x[0], xn, &y[0], yn); + } + static void uadd(VintT& z, const Buffer& x, size_t xn, const Buffer& y, size_t yn) + { + size_t zn = fp::max_(xn, yn) + 1; + bool b; + z.buf_.alloc(&b, zn); + assert(b); (void)b; + z.buf_[zn - 1] = vint::addNM(&z.buf_[0], &x[0], xn, &y[0], yn); + z.trim(zn); + } + static void uadd1(VintT& z, const Buffer& x, size_t xn, Unit y) + { + size_t zn = xn + 1; + bool b; + z.buf_.alloc(&b, zn); + assert(b); (void)b; + z.buf_[zn - 1] = vint::addu1(&z.buf_[0], &x[0], xn, y); + z.trim(zn); + } + static void usub1(VintT& z, const Buffer& x, size_t xn, Unit y) + { + size_t zn = xn; + bool b; + z.buf_.alloc(&b, zn); + assert(b); (void)b; + Unit c = vint::subu1(&z.buf_[0], &x[0], xn, y); + (void)c; + assert(!c); + z.trim(zn); + } + static void usub(VintT& z, const Buffer& x, size_t xn, const Buffer& y, size_t yn) + { + assert(xn >= yn); + bool b; + z.buf_.alloc(&b, xn); + assert(b); (void)b; + Unit c = vint::subN(&z.buf_[0], &x[0], &y[0], yn); + if (xn > yn) { + c = vint::subu1(&z.buf_[yn], &x[yn], xn - yn, c); + } + assert(!c); + z.trim(xn); + } + static void _add(VintT& z, const VintT& x, bool xNeg, const VintT& y, bool yNeg) + { + if ((xNeg ^ yNeg) == 0) { + // same sign + uadd(z, x.buf_, x.size(), y.buf_, y.size()); + z.isNeg_ = xNeg; + return; + } + int r = ucompare(x.buf_, x.size(), y.buf_, y.size()); + if (r >= 0) { + usub(z, x.buf_, x.size(), y.buf_, y.size()); + z.isNeg_ = xNeg; + } else { + usub(z, y.buf_, y.size(), x.buf_, x.size()); + z.isNeg_ = yNeg; + } + } + static void _adds1(VintT& z, const VintT& x, int y, bool yNeg) + { + assert(y >= 0); + if ((x.isNeg_ ^ yNeg) == 0) { + // same sign + uadd1(z, x.buf_, x.size(), y); + z.isNeg_ = yNeg; + return; + } + if (x.size() > 1 || x.buf_[0] >= (Unit)y) { + usub1(z, x.buf_, x.size(), y); + z.isNeg_ = x.isNeg_; + } else { + z = y - x.buf_[0]; + z.isNeg_ = yNeg; + } + } + static void _addu1(VintT& z, const VintT& x, Unit y, bool yNeg) + { + if ((x.isNeg_ ^ yNeg) == 0) { + // same sign + uadd1(z, x.buf_, x.size(), y); + z.isNeg_ = yNeg; + return; + } + if (x.size() > 1 || x.buf_[0] >= y) { + usub1(z, x.buf_, x.size(), y); + z.isNeg_ = x.isNeg_; + } else { + z = y - x.buf_[0]; + z.isNeg_ = yNeg; + } + } + /** + @param q [out] x / y if q != 0 + @param r [out] x % y + */ + static void udiv(VintT* q, VintT& r, const Buffer& x, size_t xn, const Buffer& y, size_t yn) + { + assert(q != &r); + if (xn < yn) { + r.buf_ = x; + r.trim(xn); + if (q) q->clear(); + return; + } + size_t qn = xn - yn + 1; + bool b; + if (q) { + q->buf_.alloc(&b, qn); + assert(b); (void)b; + } + r.buf_.alloc(&b, yn); + assert(b); (void)b; + vint::divNM(q ? &q->buf_[0] : 0, qn, &r.buf_[0], &x[0], xn, &y[0], yn); + if (q) { + q->trim(qn); + } + r.trim(yn); + } + /* + @param x [inout] x <- d + @retval s for x = 2^s d where d is odd + */ + static uint32_t countTrailingZero(VintT& x) + { + uint32_t s = 0; + while (x.isEven()) { + x >>= 1; + s++; + } + return s; + } + struct MulMod { + const VintT *pm; + void operator()(VintT& z, const VintT& x, const VintT& y) const + { + VintT::mul(z, x, y); + z %= *pm; + } + }; + struct SqrMod { + const VintT *pm; + void operator()(VintT& y, const VintT& x) const + { + VintT::sqr(y, x); + y %= *pm; + } + }; +public: + VintT(int x = 0) + : size_(0) + { + *this = x; + } + VintT(Unit x) + : size_(0) + { + *this = x; + } + VintT(const VintT& rhs) + : buf_(rhs.buf_) + , size_(rhs.size_) + , isNeg_(rhs.isNeg_) + { + } + VintT& operator=(int x) + { + assert(x != invalidVar); + isNeg_ = x < 0; + bool b; + buf_.alloc(&b, 1); + assert(b); (void)b; + buf_[0] = fp::abs_(x); + size_ = 1; + return *this; + } + VintT& operator=(Unit x) + { + isNeg_ = false; + bool b; + buf_.alloc(&b, 1); + assert(b); (void)b; + buf_[0] = x; + size_ = 1; + return *this; + } + VintT& operator=(const VintT& rhs) + { + buf_ = rhs.buf_; + size_ = rhs.size_; + isNeg_ = rhs.isNeg_; + return *this; + } +#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 + VintT(VintT&& rhs) + : buf_(rhs.buf_) + , size_(rhs.size_) + , isNeg_(rhs.isNeg_) + { + } + VintT& operator=(VintT&& rhs) + { + buf_ = std::move(rhs.buf_); + size_ = rhs.size_; + isNeg_ = rhs.isNeg_; + return *this; + } +#endif + void swap(VintT& rhs) +#if CYBOZU_CPP_VERSION >= CYBOZU_CPP_VERSION_CPP11 + noexcept +#endif + { + fp::swap_(buf_, rhs.buf_); + fp::swap_(size_, rhs.size_); + fp::swap_(isNeg_, rhs.isNeg_); + } + void dump(const char *msg = "") const + { + vint::dump(&buf_[0], size_, msg); + } + /* + set positive value + @note assume little endian system + */ + template<class S> + void setArray(bool *pb, const S *x, size_t size) + { + isNeg_ = false; + if (size == 0) { + clear(); + *pb = true; + return; + } + size_t unitSize = (sizeof(S) * size + sizeof(Unit) - 1) / sizeof(Unit); + buf_.alloc(pb, unitSize); + if (!*pb) return; + buf_[unitSize - 1] = 0; + memcpy(&buf_[0], x, sizeof(S) * size); + trim(unitSize); + } + /* + set [0, max) randomly + */ + void setRand(bool *pb, const VintT& max, fp::RandGen rg = fp::RandGen()) + { + assert(max > 0); + if (rg.isZero()) rg = fp::RandGen::get(); + size_t n = max.size(); + buf_.alloc(pb, n); + if (!*pb) return; + rg.read(&buf_[0], n * sizeof(buf_[0])); + trim(n); + *this %= max; + } + /* + get abs value + buf_[0, size) = x + buf_[size, maxSize) with zero + @note assume little endian system + */ + void getArray(bool *pb, Unit *x, size_t maxSize) const + { + size_t n = size(); + if (n > maxSize) { + *pb = false; + return; + } + vint::copyN(x, &buf_[0], n); + vint::clearN(x + n, maxSize - n); + *pb = true; + } + void clear() { *this = 0; } + template<class OutputStream> + void save(bool *pb, OutputStream& os, int base = 10) const + { + if (isNeg_) cybozu::writeChar(pb, os, '-'); + char buf[1024]; + size_t n = mcl::fp::arrayToStr(buf, sizeof(buf), &buf_[0], size_, base, false); + if (n == 0) { + *pb = false; + return; + } + cybozu::write(pb, os, buf + sizeof(buf) - n, n); + } + /* + set buf with string terminated by '\0' + return strlen(buf) if success else 0 + */ + size_t getStr(char *buf, size_t bufSize, int base = 10) const + { + cybozu::MemoryOutputStream os(buf, bufSize); + bool b; + save(&b, os, base); + const size_t n = os.getPos(); + if (!b || n == bufSize) return 0; + buf[n] = '\0'; + return n; + } + /* + return bitSize(abs(*this)) + @note return 1 if zero + */ + size_t getBitSize() const + { + if (isZero()) return 1; + size_t n = size(); + Unit v = buf_[n - 1]; + assert(v); + return (n - 1) * sizeof(Unit) * 8 + 1 + cybozu::bsr<Unit>(v); + } + // ignore sign + bool testBit(size_t i) const + { + size_t q = i / unitBitSize; + size_t r = i % unitBitSize; + assert(q <= size()); + Unit mask = Unit(1) << r; + return (buf_[q] & mask) != 0; + } + void setBit(size_t i, bool v = true) + { + size_t q = i / unitBitSize; + size_t r = i % unitBitSize; + assert(q <= size()); + bool b; + buf_.alloc(&b, q + 1); + assert(b); (void)b; + Unit mask = Unit(1) << r; + if (v) { + buf_[q] |= mask; + } else { + buf_[q] &= ~mask; + trim(q + 1); + } + } + /* + @param str [in] number string + @note "0x..." => base = 16 + "0b..." => base = 2 + otherwise => base = 10 + */ + void setStr(bool *pb, const char *str, int base = 0) + { + const size_t maxN = MCL_MAX_BIT_SIZE / (sizeof(MCL_SIZEOF_UNIT) * 8); + buf_.alloc(pb, maxN); + if (!*pb) return; + *pb = false; + isNeg_ = false; + size_t len = strlen(str); + size_t n = fp::strToArray(&isNeg_, &buf_[0], maxN, str, len, base); + if (n == 0) return; + trim(n); + *pb = true; + } + static int compare(const VintT& x, const VintT& y) + { + if (x.isNeg_ ^ y.isNeg_) { + if (x.isZero() && y.isZero()) return 0; + return x.isNeg_ ? -1 : 1; + } else { + // same sign + int c = ucompare(x.buf_, x.size(), y.buf_, y.size()); + if (x.isNeg_) { + return -c; + } + return c; + } + } + static int compares1(const VintT& x, int y) + { + assert(y != invalidVar); + if (x.isNeg_ ^ (y < 0)) { + if (x.isZero() && y == 0) return 0; + return x.isNeg_ ? -1 : 1; + } else { + // same sign + Unit y0 = fp::abs_(y); + int c = vint::compareNM(&x.buf_[0], x.size(), &y0, 1); + if (x.isNeg_) { + return -c; + } + return c; + } + } + static int compareu1(const VintT& x, uint32_t y) + { + if (x.isNeg_) return -1; + if (x.size() > 1) return 1; + Unit x0 = x.buf_[0]; + return x0 > y ? 1 : x0 == y ? 0 : -1; + } + size_t size() const { return size_; } + bool isZero() const { return size() == 1 && buf_[0] == 0; } + bool isNegative() const { return !isZero() && isNeg_; } + uint32_t getLow32bit() const { return (uint32_t)buf_[0]; } + bool isOdd() const { return (buf_[0] & 1) == 1; } + bool isEven() const { return !isOdd(); } + const Unit *getUnit() const { return &buf_[0]; } + size_t getUnitSize() const { return size_; } + static void add(VintT& z, const VintT& x, const VintT& y) + { + _add(z, x, x.isNeg_, y, y.isNeg_); + } + static void sub(VintT& z, const VintT& x, const VintT& y) + { + _add(z, x, x.isNeg_, y, !y.isNeg_); + } + static void mul(VintT& z, const VintT& x, const VintT& y) + { + const size_t xn = x.size(); + const size_t yn = y.size(); + size_t zn = xn + yn; + bool b; + z.buf_.alloc(&b, zn); + assert(b); (void)b; + vint::mulNM(&z.buf_[0], &x.buf_[0], xn, &y.buf_[0], yn); + z.isNeg_ = x.isNeg_ ^ y.isNeg_; + z.trim(zn); + } + static void sqr(VintT& y, const VintT& x) + { + mul(y, x, x); + } + static void addu1(VintT& z, const VintT& x, Unit y) + { + _addu1(z, x, y, false); + } + static void subu1(VintT& z, const VintT& x, Unit y) + { + _addu1(z, x, y, true); + } + static void mulu1(VintT& z, const VintT& x, Unit y) + { + size_t xn = x.size(); + size_t zn = xn + 1; + bool b; + z.buf_.alloc(&b, zn); + assert(b); (void)b; + z.buf_[zn - 1] = vint::mulu1(&z.buf_[0], &x.buf_[0], xn, y); + z.isNeg_ = x.isNeg_; + z.trim(zn); + } + static void divu1(VintT& q, const VintT& x, Unit y) + { + udivModu1(&q, x, y); + } + static void modu1(VintT& r, const VintT& x, Unit y) + { + bool xNeg = x.isNeg_; + r = divModu1(0, x, y); + r.isNeg_ = xNeg; + } + static void adds1(VintT& z, const VintT& x, int y) + { + assert(y != invalidVar); + _adds1(z, x, fp::abs_(y), y < 0); + } + static void subs1(VintT& z, const VintT& x, int y) + { + assert(y != invalidVar); + _adds1(z, x, fp::abs_(y), !(y < 0)); + } + static void muls1(VintT& z, const VintT& x, int y) + { + assert(y != invalidVar); + mulu1(z, x, fp::abs_(y)); + z.isNeg_ ^= (y < 0); + } + /* + @param q [out] q = x / y if q is not zero + @param x [in] + @param y [in] must be not zero + return x % y + */ + static int divMods1(VintT *q, const VintT& x, int y) + { + assert(y != invalidVar); + bool xNeg = x.isNeg_; + bool yNeg = y < 0; + Unit absY = fp::abs_(y); + size_t xn = x.size(); + int r; + if (q) { + q->isNeg_ = xNeg ^ yNeg; + bool b; + q->buf_.alloc(&b, xn); + assert(b); (void)b; + r = (int)vint::divu1(&q->buf_[0], &x.buf_[0], xn, absY); + q->trim(xn); + } else { + r = (int)vint::modu1(&x.buf_[0], xn, absY); + } + return xNeg ? -r : r; + } + /* + like C + 13 / 5 = 2 ... 3 + 13 / -5 = -2 ... 3 + -13 / 5 = -2 ... -3 + -13 / -5 = 2 ... -3 + */ + static void divMod(VintT *q, VintT& r, const VintT& x, const VintT& y) + { + bool qsign = x.isNeg_ ^ y.isNeg_; + udiv(q, r, x.buf_, x.size(), y.buf_, y.size()); + r.isNeg_ = x.isNeg_; + if (q) q->isNeg_ = qsign; + } + static void div(VintT& q, const VintT& x, const VintT& y) + { + VintT r; + divMod(&q, r, x, y); + } + static void mod(VintT& r, const VintT& x, const VintT& y) + { + divMod(0, r, x, y); + } + static void divs1(VintT& q, const VintT& x, int y) + { + divMods1(&q, x, y); + } + static void mods1(VintT& r, const VintT& x, int y) + { + bool xNeg = x.isNeg_; + r = divMods1(0, x, y); + r.isNeg_ = xNeg; + } + static Unit udivModu1(VintT *q, const VintT& x, Unit y) + { + assert(!x.isNeg_); + size_t xn = x.size(); + if (q) { + bool b; + q->buf_.alloc(&b, xn); + assert(b); (void)b; + } + Unit r = vint::divu1(q ? &q->buf_[0] : 0, &x.buf_[0], xn, y); + if (q) { + q->trim(xn); + q->isNeg_ = false; + } + return r; + } + /* + like Python + 13 / 5 = 2 ... 3 + 13 / -5 = -3 ... -2 + -13 / 5 = -3 ... 2 + -13 / -5 = 2 ... -3 + */ + static void quotRem(VintT *q, VintT& r, const VintT& x, const VintT& y) + { + VintT yy = y; + bool qsign = x.isNeg_ ^ y.isNeg_; + udiv(q, r, x.buf_, x.size(), y.buf_, y.size()); + r.isNeg_ = y.isNeg_; + if (q) q->isNeg_ = qsign; + if (!r.isZero() && qsign) { + if (q) { + uadd1(*q, q->buf_, q->size(), 1); + } + usub(r, yy.buf_, yy.size(), r.buf_, r.size()); + } + } + template<class InputStream> + void load(bool *pb, InputStream& is, int ioMode) + { + *pb = false; + char buf[1024]; + size_t n = fp::local::loadWord(buf, sizeof(buf), is); + if (n == 0) return; + const size_t maxN = 384 / (sizeof(MCL_SIZEOF_UNIT) * 8); + buf_.alloc(pb, maxN); + if (!*pb) return; + isNeg_ = false; + n = fp::strToArray(&isNeg_, &buf_[0], maxN, buf, n, ioMode); + if (n == 0) return; + trim(n); + *pb = true; + } + // logical left shift (copy sign) + static void shl(VintT& y, const VintT& x, size_t shiftBit) + { + size_t xn = x.size(); + size_t yn = xn + (shiftBit + unitBitSize - 1) / unitBitSize; + bool b; + y.buf_.alloc(&b, yn); + assert(b); (void)b; + vint::shlN(&y.buf_[0], &x.buf_[0], xn, shiftBit); + y.isNeg_ = x.isNeg_; + y.trim(yn); + } + // logical right shift (copy sign) + static void shr(VintT& y, const VintT& x, size_t shiftBit) + { + size_t xn = x.size(); + if (xn * unitBitSize <= shiftBit) { + y.clear(); + return; + } + size_t yn = xn - shiftBit / unitBitSize; + bool b; + y.buf_.alloc(&b, yn); + assert(b); (void)b; + vint::shrN(&y.buf_[0], &x.buf_[0], xn, shiftBit); + y.isNeg_ = x.isNeg_; + y.trim(yn); + } + static void neg(VintT& y, const VintT& x) + { + if (&y != &x) { y = x; } + y.isNeg_ = !x.isNeg_; + } + static void abs(VintT& y, const VintT& x) + { + if (&y != &x) { y = x; } + y.isNeg_ = false; + } + static VintT abs(const VintT& x) + { + VintT y = x; + abs(y, x); + return y; + } + // accept only non-negative value + static void orBit(VintT& z, const VintT& x, const VintT& y) + { + assert(!x.isNeg_ && !y.isNeg_); + const VintT *px = &x, *py = &y; + if (x.size() < y.size()) { + fp::swap_(px, py); + } + size_t xn = px->size(); + size_t yn = py->size(); + assert(xn >= yn); + bool b; + z.buf_.alloc(&b, xn); + assert(b); (void)b; + for (size_t i = 0; i < yn; i++) { + z.buf_[i] = x.buf_[i] | y.buf_[i]; + } + vint::copyN(&z.buf_[0] + yn, &px->buf_[0] + yn, xn - yn); + z.trim(xn); + } + static void andBit(VintT& z, const VintT& x, const VintT& y) + { + assert(!x.isNeg_ && !y.isNeg_); + const VintT *px = &x, *py = &y; + if (x.size() < y.size()) { + fp::swap_(px, py); + } + size_t yn = py->size(); + assert(px->size() >= yn); + bool b; + z.buf_.alloc(&b, yn); + assert(b); (void)b; + for (size_t i = 0; i < yn; i++) { + z.buf_[i] = x.buf_[i] & y.buf_[i]; + } + z.trim(yn); + } + static void orBitu1(VintT& z, const VintT& x, Unit y) + { + assert(!x.isNeg_); + z = x; + z.buf_[0] |= y; + } + static void andBitu1(VintT& z, const VintT& x, Unit y) + { + assert(!x.isNeg_); + bool b; + z.buf_.alloc(&b, 1); + assert(b); (void)b; + z.buf_[0] = x.buf_[0] & y; + z.size_ = 1; + z.isNeg_ = false; + } + /* + REMARK y >= 0; + */ + static void pow(VintT& z, const VintT& x, const VintT& y) + { + assert(!y.isNeg_); + const VintT xx = x; + z = 1; + mcl::fp::powGeneric(z, xx, &y.buf_[0], y.size(), mul, sqr, (void (*)(VintT&, const VintT&))0); + } + /* + REMARK y >= 0; + */ + static void pow(VintT& z, const VintT& x, int64_t y) + { + assert(y >= 0); + const VintT xx = x; + z = 1; +#if MCL_SIZEOF_UNIT == 8 + Unit ua = fp::abs_(y); + mcl::fp::powGeneric(z, xx, &ua, 1, mul, sqr, (void (*)(VintT&, const VintT&))0); +#else + uint64_t ua = fp::abs_(y); + Unit u[2] = { uint32_t(ua), uint32_t(ua >> 32) }; + size_t un = u[1] ? 2 : 1; + mcl::fp::powGeneric(z, xx, u, un, mul, sqr, (void (*)(VintT&, const VintT&))0); +#endif + } + /* + z = x ^ y mod m + REMARK y >= 0; + */ + static void powMod(VintT& z, const VintT& x, const VintT& y, const VintT& m) + { + assert(!y.isNeg_); + VintT zz; + MulMod mulMod; + SqrMod sqrMod; + mulMod.pm = &m; + sqrMod.pm = &m; + zz = 1; + mcl::fp::powGeneric(zz, x, &y.buf_[0], y.size(), mulMod, sqrMod, (void (*)(VintT&, const VintT&))0); + z.swap(zz); + } + /* + inverse mod + y = 1/x mod m + REMARK x != 0 and m != 0; + */ + static void invMod(VintT& y, const VintT& x, const VintT& m) + { + assert(!x.isZero() && !m.isZero()); + if (x == 1) { + y = 1; + return; + } + VintT a = 1; + VintT t; + VintT q; + divMod(&q, t, m, x); + VintT s = x; + VintT b = -q; + + for (;;) { + divMod(&q, s, s, t); + if (s.isZero()) { + if (b.isNeg_) { + b += m; + } + y = b; + return; + } + a -= b * q; + + divMod(&q, t, t, s); + if (t.isZero()) { + if (a.isNeg_) { + a += m; + } + y = a; + return; + } + b -= a * q; + } + } + /* + Miller-Rabin + */ + static bool isPrime(bool *pb, const VintT& n, int tryNum = 32) + { + *pb = true; + if (n <= 1) return false; + if (n == 2 || n == 3) return true; + if (n.isEven()) return false; + cybozu::XorShift rg; + const VintT nm1 = n - 1; + VintT d = nm1; + uint32_t r = countTrailingZero(d); + // n - 1 = 2^r d + VintT a, x; + for (int i = 0; i < tryNum; i++) { + a.setRand(pb, n - 3, rg); + if (!*pb) return false; + a += 2; // a in [2, n - 2] + powMod(x, a, d, n); + if (x == 1 || x == nm1) { + continue; + } + for (uint32_t j = 1; j < r; j++) { + sqr(x, x); + x %= n; + if (x == 1) return false; + if (x == nm1) goto NEXT_LOOP; + } + return false; + NEXT_LOOP:; + } + return true; + } + bool isPrime(bool *pb, int tryNum = 32) const + { + return isPrime(pb, *this, tryNum); + } + static void gcd(VintT& z, VintT x, VintT y) + { + VintT t; + for (;;) { + if (y.isZero()) { + z = x; + return; + } + t = x; + x = y; + mod(y, t, y); + } + } + static VintT gcd(const VintT& x, const VintT& y) + { + VintT z; + gcd(z, x, y); + return z; + } + static void lcm(VintT& z, const VintT& x, const VintT& y) + { + VintT c; + gcd(c, x, y); + div(c, x, c); + mul(z, c, y); + } + static VintT lcm(const VintT& x, const VintT& y) + { + VintT z; + lcm(z, x, y); + return z; + } + /* + 1 if m is quadratic residue modulo n (i.e., there exists an x s.t. x^2 = m mod n) + 0 if m = 0 mod n + -1 otherwise + @note return legendre_symbol(m, p) for m and odd prime p + */ + static int jacobi(VintT m, VintT n) + { + assert(n.isOdd()); + if (n == 1) return 1; + if (m < 0 || m > n) { + quotRem(0, m, m, n); // m = m mod n + } + if (m.isZero()) return 0; + if (m == 1) return 1; + if (gcd(m, n) != 1) return 0; + + int j = 1; + VintT t; + goto START; + while (m != 1) { + if ((m.getLow32bit() % 4) == 3 && (n.getLow32bit() % 4) == 3) { + j = -j; + } + mod(t, n, m); + n = m; + m = t; + START: + int s = countTrailingZero(m); + uint32_t nmod8 = n.getLow32bit() % 8; + if ((s % 2) && (nmod8 == 3 || nmod8 == 5)) { + j = -j; + } + } + return j; + } +#ifndef CYBOZU_DONT_USE_STRING + explicit VintT(const std::string& str) + : size_(0) + { + setStr(str); + } + void getStr(std::string& s, int base = 10) const + { + s.clear(); + cybozu::StringOutputStream os(s); + save(os, base); + } + std::string getStr(int base = 10) const + { + std::string s; + getStr(s, base); + return s; + } + inline friend std::ostream& operator<<(std::ostream& os, const VintT& x) + { + return os << x.getStr(os.flags() & std::ios_base::hex ? 16 : 10); + } + inline friend std::istream& operator>>(std::istream& is, VintT& x) + { + x.load(is); + return is; + } +#endif +#ifndef CYBOZU_DONT_USE_EXCEPTION + void setStr(const std::string& str, int base = 0) + { + bool b; + setStr(&b, str.c_str(), base); + if (!b) throw cybozu::Exception("Vint:setStr") << str; + } + void setRand(const VintT& max, fp::RandGen rg = fp::RandGen()) + { + bool b; + setRand(&b, max, rg); + if (!b) throw cybozu::Exception("Vint:setRand"); + } + void getArray(Unit *x, size_t maxSize) const + { + bool b; + getArray(&b, x, maxSize); + if (!b) throw cybozu::Exception("Vint:getArray"); + } + template<class InputStream> + void load(InputStream& is, int ioMode = 0) + { + bool b; + load(&b, is, ioMode); + if (!b) throw cybozu::Exception("Vint:load"); + } + template<class OutputStream> + void save(OutputStream& os, int base = 10) const + { + bool b; + save(&b, os, base); + if (!b) throw cybozu::Exception("Vint:save"); + } + static bool isPrime(const VintT& n, int tryNum = 32) + { + bool b; + bool ret = isPrime(&b, n, tryNum); + if (!b) throw cybozu::Exception("Vint:isPrime"); + return ret; + } + bool isPrime(int tryNum = 32) const + { + bool b; + bool ret = isPrime(&b, *this, tryNum); + if (!b) throw cybozu::Exception("Vint:isPrime"); + return ret; + } + template<class S> + void setArray(const S *x, size_t size) + { + bool b; + setArray(&b, x, size); + if (!b) throw cybozu::Exception("Vint:setArray"); + } +#endif + VintT& operator++() { adds1(*this, *this, 1); return *this; } + VintT& operator--() { subs1(*this, *this, 1); return *this; } + VintT operator++(int) { VintT c = *this; adds1(*this, *this, 1); return c; } + VintT operator--(int) { VintT c = *this; subs1(*this, *this, 1); return c; } + friend bool operator<(const VintT& x, const VintT& y) { return compare(x, y) < 0; } + friend bool operator>=(const VintT& x, const VintT& y) { return !operator<(x, y); } + friend bool operator>(const VintT& x, const VintT& y) { return compare(x, y) > 0; } + friend bool operator<=(const VintT& x, const VintT& y) { return !operator>(x, y); } + friend bool operator==(const VintT& x, const VintT& y) { return compare(x, y) == 0; } + friend bool operator!=(const VintT& x, const VintT& y) { return !operator==(x, y); } + + friend bool operator<(const VintT& x, int y) { return compares1(x, y) < 0; } + friend bool operator>=(const VintT& x, int y) { return !operator<(x, y); } + friend bool operator>(const VintT& x, int y) { return compares1(x, y) > 0; } + friend bool operator<=(const VintT& x, int y) { return !operator>(x, y); } + friend bool operator==(const VintT& x, int y) { return compares1(x, y) == 0; } + friend bool operator!=(const VintT& x, int y) { return !operator==(x, y); } + + friend bool operator<(const VintT& x, uint32_t y) { return compareu1(x, y) < 0; } + friend bool operator>=(const VintT& x, uint32_t y) { return !operator<(x, y); } + friend bool operator>(const VintT& x, uint32_t y) { return compareu1(x, y) > 0; } + friend bool operator<=(const VintT& x, uint32_t y) { return !operator>(x, y); } + friend bool operator==(const VintT& x, uint32_t y) { return compareu1(x, y) == 0; } + friend bool operator!=(const VintT& x, uint32_t y) { return !operator==(x, y); } + + VintT& operator+=(const VintT& rhs) { add(*this, *this, rhs); return *this; } + VintT& operator-=(const VintT& rhs) { sub(*this, *this, rhs); return *this; } + VintT& operator*=(const VintT& rhs) { mul(*this, *this, rhs); return *this; } + VintT& operator/=(const VintT& rhs) { div(*this, *this, rhs); return *this; } + VintT& operator%=(const VintT& rhs) { mod(*this, *this, rhs); return *this; } + VintT& operator&=(const VintT& rhs) { andBit(*this, *this, rhs); return *this; } + VintT& operator|=(const VintT& rhs) { orBit(*this, *this, rhs); return *this; } + + VintT& operator+=(int rhs) { adds1(*this, *this, rhs); return *this; } + VintT& operator-=(int rhs) { subs1(*this, *this, rhs); return *this; } + VintT& operator*=(int rhs) { muls1(*this, *this, rhs); return *this; } + VintT& operator/=(int rhs) { divs1(*this, *this, rhs); return *this; } + VintT& operator%=(int rhs) { mods1(*this, *this, rhs); return *this; } + VintT& operator+=(Unit rhs) { addu1(*this, *this, rhs); return *this; } + VintT& operator-=(Unit rhs) { subu1(*this, *this, rhs); return *this; } + VintT& operator*=(Unit rhs) { mulu1(*this, *this, rhs); return *this; } + VintT& operator/=(Unit rhs) { divu1(*this, *this, rhs); return *this; } + VintT& operator%=(Unit rhs) { modu1(*this, *this, rhs); return *this; } + + VintT& operator&=(Unit rhs) { andBitu1(*this, *this, rhs); return *this; } + VintT& operator|=(Unit rhs) { orBitu1(*this, *this, rhs); return *this; } + + friend VintT operator+(const VintT& a, const VintT& b) { VintT c; add(c, a, b); return c; } + friend VintT operator-(const VintT& a, const VintT& b) { VintT c; sub(c, a, b); return c; } + friend VintT operator*(const VintT& a, const VintT& b) { VintT c; mul(c, a, b); return c; } + friend VintT operator/(const VintT& a, const VintT& b) { VintT c; div(c, a, b); return c; } + friend VintT operator%(const VintT& a, const VintT& b) { VintT c; mod(c, a, b); return c; } + friend VintT operator&(const VintT& a, const VintT& b) { VintT c; andBit(c, a, b); return c; } + friend VintT operator|(const VintT& a, const VintT& b) { VintT c; orBit(c, a, b); return c; } + + friend VintT operator+(const VintT& a, int b) { VintT c; adds1(c, a, b); return c; } + friend VintT operator-(const VintT& a, int b) { VintT c; subs1(c, a, b); return c; } + friend VintT operator*(const VintT& a, int b) { VintT c; muls1(c, a, b); return c; } + friend VintT operator/(const VintT& a, int b) { VintT c; divs1(c, a, b); return c; } + friend VintT operator%(const VintT& a, int b) { VintT c; mods1(c, a, b); return c; } + friend VintT operator+(const VintT& a, Unit b) { VintT c; addu1(c, a, b); return c; } + friend VintT operator-(const VintT& a, Unit b) { VintT c; subu1(c, a, b); return c; } + friend VintT operator*(const VintT& a, Unit b) { VintT c; mulu1(c, a, b); return c; } + friend VintT operator/(const VintT& a, Unit b) { VintT c; divu1(c, a, b); return c; } + friend VintT operator%(const VintT& a, Unit b) { VintT c; modu1(c, a, b); return c; } + + friend VintT operator&(const VintT& a, Unit b) { VintT c; andBitu1(c, a, b); return c; } + friend VintT operator|(const VintT& a, Unit b) { VintT c; orBitu1(c, a, b); return c; } + + VintT operator-() const { VintT c; neg(c, *this); return c; } + VintT& operator<<=(size_t n) { shl(*this, *this, n); return *this; } + VintT& operator>>=(size_t n) { shr(*this, *this, n); return *this; } + VintT operator<<(size_t n) const { VintT c = *this; c <<= n; return c; } + VintT operator>>(size_t n) const { VintT c = *this; c >>= n; return c; } +}; + +#ifdef MCL_VINT_FIXED_BUFFER +typedef VintT<vint::FixedBuffer<mcl::vint::Unit, MCL_MAX_BIT_SIZE * 2> > Vint; +#else +typedef VintT<vint::Buffer<mcl::vint::Unit> > Vint; +#endif + +} // mcl + +//typedef mcl::Vint mpz_class; diff --git a/vendor/github.com/dexon-foundation/mcl/include/mcl/window_method.hpp b/vendor/github.com/dexon-foundation/mcl/include/mcl/window_method.hpp new file mode 100644 index 000000000..cb4fad37e --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/include/mcl/window_method.hpp @@ -0,0 +1,175 @@ +#pragma once +/** + @file + @brief window method + @author MITSUNARI Shigeo(@herumi) +*/ +#include <mcl/array.hpp> +#include <mcl/fp.hpp> + +namespace mcl { namespace fp { + +/* + get w-bit size from x[0, bitSize) + @param x [in] data + @param bitSize [in] data size + @param w [in] split size < UnitBitSize +*/ +template<class T> +struct ArrayIterator { + static const size_t TbitSize = sizeof(T) * 8; + ArrayIterator(const T *x, size_t bitSize, size_t w) + : x(x) + , bitSize(bitSize) + , w(w) + , pos(0) + , mask((w == TbitSize ? 0 : (T(1) << w)) - 1) + { + assert(w <= TbitSize); + } + bool hasNext() const { return bitSize > 0; } + T getNext() + { + if (w == TbitSize) { + bitSize -= w; + return *x++; + } + if (pos + w < TbitSize) { + T v = (*x >> pos) & mask; + pos += w; + if (bitSize < w) { + bitSize = 0; + } else { + bitSize -= w; + } + return v; + } + if (pos + bitSize <= TbitSize) { + assert(bitSize <= w); + T v = *x >> pos; + assert((v >> bitSize) == 0); + bitSize = 0; + return v & mask; + } + assert(pos > 0); + T v = (x[0] >> pos) | (x[1] << (TbitSize - pos)); + v &= mask; + pos = (pos + w) - TbitSize; + bitSize -= w; + x++; + return v; + } + const T *x; + size_t bitSize; + size_t w; + size_t pos; + T mask; +}; + +template<class Ec> +class WindowMethod { +public: + size_t bitSize_; + size_t winSize_; + mcl::Array<Ec> tbl_; + WindowMethod(const Ec& x, size_t bitSize, size_t winSize) + { + init(x, bitSize, winSize); + } + WindowMethod() + : bitSize_(0) + , winSize_(0) + { + } + /* + @param x [in] base index + @param bitSize [in] exponent bit length + @param winSize [in] window size + */ + void init(bool *pb, const Ec& x, size_t bitSize, size_t winSize) + { + bitSize_ = bitSize; + winSize_ = winSize; + const size_t tblNum = (bitSize + winSize - 1) / winSize; + const size_t r = size_t(1) << winSize; + *pb = tbl_.resize(tblNum * r); + if (!*pb) return; + Ec t(x); + for (size_t i = 0; i < tblNum; i++) { + Ec* w = &tbl_[i * r]; + w[0].clear(); + for (size_t d = 1; d < r; d *= 2) { + for (size_t j = 0; j < d; j++) { + Ec::add(w[j + d], w[j], t); + } + Ec::dbl(t, t); + } + for (size_t j = 0; j < r; j++) { + w[j].normalize(); + } + } + } +#ifndef CYBOZU_DONT_USE_EXCEPTION + void init(const Ec& x, size_t bitSize, size_t winSize) + { + bool b; + init(&b, x, bitSize, winSize); + if (!b) throw cybozu::Exception("mcl:WindowMethod:init") << bitSize << winSize; + } +#endif + /* + @param z [out] x multiplied by y + @param y [in] exponent + */ + template<class tag2, size_t maxBitSize2> + void mul(Ec& z, const FpT<tag2, maxBitSize2>& y) const + { + fp::Block b; + y.getBlock(b); + powArray(z, b.p, b.n, false); + } + void mul(Ec& z, int64_t y) const + { +#if MCL_SIZEOF_UNIT == 8 + Unit u = fp::abs_(y); + powArray(z, &u, 1, y < 0); +#else + uint64_t ua = fp::abs_(y); + Unit u[2] = { uint32_t(ua), uint32_t(ua >> 32) }; + size_t un = u[1] ? 2 : 1; + powArray(z, u, un, y < 0); +#endif + } + void mul(Ec& z, const mpz_class& y) const + { + powArray(z, gmp::getUnit(y), gmp::getUnitSize(y), y < 0); + } + void powArray(Ec& z, const Unit* y, size_t n, bool isNegative) const + { + z.clear(); + while (n > 0) { + if (y[n - 1]) break; + n--; + } + if (n == 0) return; + assert((n << winSize_) <= tbl_.size()); + if ((n << winSize_) > tbl_.size()) return; + assert(y[n - 1]); + const size_t bitSize = (n - 1) * UnitBitSize + cybozu::bsr<Unit>(y[n - 1]) + 1; + size_t i = 0; + ArrayIterator<Unit> ai(y, bitSize, winSize_); + do { + Unit v = ai.getNext(); + if (v) { + Ec::add(z, z, tbl_[(i << winSize_) + v]); + } + i++; + } while (ai.hasNext()); + if (isNegative) { + Ec::neg(z, z); + } + } +}; + +} } // mcl::fp + diff --git a/vendor/github.com/dexon-foundation/mcl/lib/.emptydir b/vendor/github.com/dexon-foundation/mcl/lib/.emptydir new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/lib/.emptydir diff --git a/vendor/github.com/dexon-foundation/mcl/mcl.sln b/vendor/github.com/dexon-foundation/mcl/mcl.sln new file mode 100644 index 000000000..7c4fe8f0c --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/mcl.sln @@ -0,0 +1,57 @@ +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 2013 +VisualStudioVersion = 12.0.40629.0 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fp_test", "test\proj\fp_test\fp_test.vcxproj", "{51266DE6-B57B-4AE3-B85C-282F170E1728}" + ProjectSection(ProjectDependencies) = postProject + {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "ec_test", "test\proj\ec_test\ec_test.vcxproj", "{46B6E88E-739A-406B-9F68-BC46C5950FA3}" + ProjectSection(ProjectDependencies) = postProject + {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mcl", "src\proj\mcl.vcxproj", "{1DBB979A-C212-45CD-9563-446A96F87F71}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fp_tower_test", "test\proj\fp_tower_test\fp_tower_test.vcxproj", "{733B6250-D249-4A99-B2A6-C8FAF6A90E97}" + ProjectSection(ProjectDependencies) = postProject + {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bn_test", "test\proj\bn_test\bn_test.vcxproj", "{9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}" + ProjectSection(ProjectDependencies) = postProject + {1DBB979A-C212-45CD-9563-446A96F87F71} = {1DBB979A-C212-45CD-9563-446A96F87F71} + EndProjectSection +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Release|x64 = Release|x64 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Debug|x64.ActiveCfg = Debug|x64 + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Debug|x64.Build.0 = Debug|x64 + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Release|x64.ActiveCfg = Release|x64 + {51266DE6-B57B-4AE3-B85C-282F170E1728}.Release|x64.Build.0 = Release|x64 + {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Debug|x64.ActiveCfg = Debug|x64 + {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Debug|x64.Build.0 = Debug|x64 + {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Release|x64.ActiveCfg = Release|x64 + {46B6E88E-739A-406B-9F68-BC46C5950FA3}.Release|x64.Build.0 = Release|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Debug|x64.ActiveCfg = Debug|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Debug|x64.Build.0 = Debug|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Release|x64.ActiveCfg = Release|x64 + {1DBB979A-C212-45CD-9563-446A96F87F71}.Release|x64.Build.0 = Release|x64 + {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Debug|x64.ActiveCfg = Debug|x64 + {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Debug|x64.Build.0 = Debug|x64 + {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Release|x64.ActiveCfg = Release|x64 + {733B6250-D249-4A99-B2A6-C8FAF6A90E97}.Release|x64.Build.0 = Release|x64 + {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Debug|x64.ActiveCfg = Debug|x64 + {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Debug|x64.Build.0 = Debug|x64 + {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Release|x64.ActiveCfg = Release|x64 + {9F935350-2F4C-45FA-A1C2-1D5AA0EADC96}.Release|x64.Build.0 = Release|x64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/vendor/github.com/dexon-foundation/mcl/mk.bat b/vendor/github.com/dexon-foundation/mcl/mk.bat new file mode 100644 index 000000000..19eb84197 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/mk.bat @@ -0,0 +1,20 @@ +@echo off +call setvar.bat +if "%1"=="-s" ( + echo use static lib + set CFLAGS=%CFLAGS% /DMCLBN_DONT_EXPORT +) else if "%1"=="-d" ( + echo use dynamic lib +) else ( + echo "mk (-s|-d) <source file>" + goto exit +) +set SRC=%2 +set EXE=%SRC:.cpp=.exe% +set EXE=%EXE:.c=.exe% +set EXE=%EXE:test\=bin\% +set EXE=%EXE:sample\=bin\% +echo cl %CFLAGS% %2 /Fe:%EXE% /link %LDFLAGS% +cl %CFLAGS% %2 /Fe:%EXE% /link %LDFLAGS% + +:exit diff --git a/vendor/github.com/dexon-foundation/mcl/mklib.bat b/vendor/github.com/dexon-foundation/mcl/mklib.bat new file mode 100644 index 000000000..b601f15d2 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/mklib.bat @@ -0,0 +1,34 @@ +@echo off +call setvar.bat +if "%1"=="dll" ( + echo make dynamic library DLL +) else ( + echo make static library LIB +) +rem nasm -f win64 -D_WIN64 src\asm\low_x86-64.asm +rem lib /OUT:lib\mcl.lib /nodefaultlib fp.obj src\asm\low_x86-64.obj + +echo cl /c %CFLAGS% src\fp.cpp /Foobj\fp.obj + cl /c %CFLAGS% src\fp.cpp /Foobj\fp.obj +echo lib /nologo /OUT:lib\mcl.lib /nodefaultlib obj\fp.obj + lib /nologo /OUT:lib\mcl.lib /nodefaultlib obj\fp.obj + +if "%1"=="dll" ( + echo cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj + cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj /DMCLBN_NO_AUTOLINK + echo link /nologo /DLL /OUT:bin\mclbn256.dll obj\bn_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn256.lib + link /nologo /DLL /OUT:bin\mclbn256.dll obj\bn_c256.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn256.lib + + echo cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj + cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj /DMCLBN_NO_AUTOLINK + echo link /nologo /DLL /OUT:bin\mclbn384.dll obj\bn_c384.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn384.lib + link /nologo /DLL /OUT:bin\mclbn384.dll obj\bn_c384.obj obj\fp.obj %LDFLAGS% /implib:lib\mclbn384.lib +) else ( + echo cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj + cl /c %CFLAGS% src\bn_c256.cpp /Foobj\bn_c256.obj + lib /nologo /OUT:lib\mclbn256.lib /nodefaultlib obj\bn_c256.obj lib\mcl.lib + + echo cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj + cl /c %CFLAGS% src\bn_c384.cpp /Foobj\bn_c384.obj + lib /nologo /OUT:lib\mclbn384.lib /nodefaultlib obj\bn_c384.obj lib\mcl.lib +) diff --git a/vendor/github.com/dexon-foundation/mcl/obj/.emptydir b/vendor/github.com/dexon-foundation/mcl/obj/.emptydir new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/obj/.emptydir diff --git a/vendor/github.com/dexon-foundation/mcl/readme.md b/vendor/github.com/dexon-foundation/mcl/readme.md new file mode 100644 index 000000000..1a9f3acc0 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/readme.md @@ -0,0 +1,365 @@ +[![Build Status](https://travis-ci.org/herumi/mcl.png)](https://travis-ci.org/herumi/mcl) + +# mcl + +A portable and fast pairing-based cryptography library. + +# Abstract + +mcl is a library for pairing-based cryptography. +The current version supports the optimal Ate pairing over BN curves and BLS12-381 curves. + +# News +* 2nd argument of `mclBn_init` is changed from `maxUnitSize` to `compiledTimeVar`, which must be `MCLBN_COMPILED_TIME_VAR`. +* break backward compatibility of mapToGi for BLS12. A map-to-function for BN is used. +If `MCL_USE_OLD_MAPTO_FOR_BLS12` is defined, then the old function is used, but this will be removed in the future. + +# Support architecture + +* x86-64 Windows + Visual Studio +* x86, x86-64 Linux + gcc/clang +* ARM Linux +* ARM64 Linux +* (maybe any platform to be supported by LLVM) +* WebAssembly + +# Support curves + +p(z) = 36z^4 + 36z^3 + 24z^2 + 6z + 1. + +* BN254 ; a BN curve over the 254-bit prime p(z) where z = -(2^62 + 2^55 + 1). +* BN\_SNARK1 ; a BN curve over a 254-bit prime p such that n := p + 1 - t has high 2-adicity. +* BN381\_1 ; a BN curve over the 381-bit prime p(z) where z = -(2^94 + 2^76 + 2^72 + 1). +* BN462 ; a BN curve over the 462-bit prime p(z) where z = 2^114 + 2^101 - 2^14 - 1. +* BLS12\_381 ; [a BLS12-381 curve](https://blog.z.cash/new-snark-curve/) + +# Benchmark + +A benchmark of a BN curve BN254(2016/12/25). + +* x64, x86 ; Inte Core i7-6700 3.4GHz(Skylake) upto 4GHz on Ubuntu 16.04. + * `sudo cpufreq-set -g performance` +* arm ; 900MHz quad-core ARM Cortex-A7 on Raspberry Pi2, Linux 4.4.11-v7+ +* arm64 ; 1.2GHz ARM Cortex-A53 [HiKey](http://www.96boards.org/product/hikey/) + +software | x64| x86| arm|arm64(msec) +---------------------------------------------------------|------|-----|----|----- +[ate-pairing](https://github.com/herumi/ate-pairing) | 0.21 | - | - | - +mcl | 0.31 | 1.6 |22.6| 3.9 +[TEPLA](http://www.cipher.risk.tsukuba.ac.jp/tepla/) | 1.76 | 3.7 | 37 | 17.9 +[RELIC](https://github.com/relic-toolkit/relic) PRIME=254| 0.30 | 3.5 | 36 | - +[MIRACL](https://github.com/miracl/MIRACL) ake12bnx | 4.2 | - | 78 | - +[NEONabe](http://sandia.cs.cinvestav.mx/Site/NEONabe) | - | - | 16 | - + +* compile option for RELIC +``` +cmake -DARITH=x64-asm-254 -DFP_PRIME=254 -DFPX_METHD="INTEG;INTEG;LAZYR" -DPP_METHD="LAZYR;OATEP" +``` +## Higher-bit BN curve benchmark by mcl + +For JavaScript(WebAssembly), see [ID based encryption demo](https://herumi.github.io/mcl-wasm/ibe-demo.html). + +paramter | x64| Firefox on x64|Safari on iPhone7| +-----------|-----|---------------|-----------------| +BN254 | 0.29| 2.48| 4.78| +BN381\_1 | 0.95| 7.91| 11.74| +BN462 | 2.16| 14.73| 22.77| + +* x64 : 'Kaby Lake Core i7-7700(3.6GHz)'. +* Firefox : 64-bit version 58. +* iPhone7 : iOS 11.2.1. +* BN254 is by `test/bn_test.cpp`. +* BN381\_1 and BN462 are by `test/bn512_test.cpp`. +* All the timings are given in ms(milliseconds). + +The other benchmark results are [bench.txt](bench.txt). + +# Installation Requirements + +* [GMP](https://gmplib.org/) and OpenSSL +``` +apt install libgmp-dev libssl-dev +``` + +Create a working directory (e.g., work) and clone the following repositories. +``` +mkdir work +cd work +git clone git://github.com/herumi/mcl +git clone git://github.com/herumi/cybozulib_ext ; for only Windows +``` +* Cybozulib\_ext is a prerequisite for running OpenSSL and GMP on VC (Visual C++). + +# Build and test on x86-64 Linux, macOS, ARM and ARM64 Linux +To make lib/libmcl.a and test it: +``` +cd work/mcl +make test +``` +To benchmark a pairing: +``` +bin/bn_test.exe +``` +To make sample programs: +``` +make sample +``` + +if you want to change compiler options for optimization, then set `CFLAGS_OPT_USER`. +``` +make CLFAGS_OPT_USER="-O2" +``` + +## Build for 32-bit Linux +Build openssl and gmp for 32-bit mode and install `<lib32>` +``` +make ARCH=x86 CFLAGS_USER="-I <lib32>/include" LDFLAGS_USER="-L <lib32>/lib -Wl,-rpath,<lib32>/lib" +``` + +## Build for 64-bit Windows +1) make static library and use it + +``` +mklib +mk -s test\bn_c256_test.cpp +bin\bn_c256_test.exe +``` +2) make dynamic library and use it + +``` +mklib dll +mk -d test\bn_c256_test.cpp +bin\bn_c256_test.exe +``` + +open mcl.sln and build or if you have msbuild.exe +``` +msbuild /p:Configuration=Release +``` + +## Build with cmake +For Linux, +``` +mkdir build +cd build +cmake .. +make +``` +For Visual Studio, +``` +mkdir build +cd build +cmake .. -A x64 +msbuild mcl.sln /p:Configuration=Release /m +``` +## Build for wasm(WebAssembly) +mcl supports emcc (Emscripten) and `test/bn_test.cpp` runs on browers such as Firefox, Chrome and Edge. + +* [IBE on browser](https://herumi.github.io/mcl-wasm/ibe-demo.html) +* [SHE on browser](https://herumi.github.io/she-wasm/she-demo.html) +* [BLS signature on brower](https://herumi.github.io/bls-wasm/bls-demo.html) + +The timing of a pairing on `BN254` is 2.8msec on 64-bit Firefox with Skylake 3.4GHz. + +### Node.js + +* [mcl-wasm](https://www.npmjs.com/package/mcl-wasm) pairing library +* [bls-wasm](https://www.npmjs.com/package/bls-wasm) BLS signature library +* [she-wasm](https://www.npmjs.com/package/she-wasm) 2 Level Homomorphic Encryption library + +### SELinux +mcl uses Xbyak JIT engine if it is available on x64 architecture, +otherwise mcl uses a little slower functions generated by LLVM. +The default mode enables SELinux security policy on CentOS, then JIT is disabled. +``` +% sudo setenforce 1 +% getenforce +Enforcing +% bin/bn_test.exe +JIT 0 +pairing 1.496Mclk +finalExp 581.081Kclk + +% sudo setenforce 0 +% getenforce +Permissive +% bin/bn_test.exe +JIT 1 +pairing 1.394Mclk +finalExp 546.259Kclk +``` + +# Libraries + +* G1 and G2 is defined over Fp +* The order of G1 and G2 is r. +* Use `bn256.hpp` if only BN254 is used. + +## C++ library + +* libmcl.a ; static C++ library of mcl +* libmcl\_dy.so ; shared C++ library of mcl +* the default parameter of curveType is BN254 + +header |support curveType |sizeof Fr|sizeof Fp| +--------------|-------------------------|---------|---------| +bn256.hpp |BN254 | 32 | 32 | +bls12_381.hpp |BLS12_381, BN254 | 32 | 48 | +bn384.hpp |BN381_1, BLS12_381, BN254| 48 | 48 | + +## C library + +* Define `MCLBN_FR_UNIT_SIZE` and `MCLBN_FP_UNIT_SIZE` and include bn.h +* set `MCLBN_FR_UNIT_SIZE = MCLBN_FP_UNIT_SIZE` unless `MCLBN_FR_UNIT_SIZE` is defined + +library |MCLBN_FR_UNIT_SIZE|MCLBN_FP_UNIT_SIZE| +------------------|------------------|------------------| +libmclbn256.a | 4 | 4 | +libmclbn384_256.a | 4 | 6 | +libmclbn384.a | 6 | 6 | + +* libmclbn*.a ; static C library +* libmclbn*\_dy.so ; shared C library + +If you want to remove `_dy` of so files, then `makeSHARE_BASENAME\_SUF=`. + +# How to initialize pairing library +Call `mcl::bn256::initPairing` before calling any operations. +``` +#include <mcl/bn256.hpp> +mcl::bn::CurveParam cp = mcl::BN254; // or mcl::BN_SNARK1 +mcl::bn256::initPairing(cp); +mcl::bn256::G1 P(...); +mcl::bn256::G2 Q(...); +mcl::bn256::Fp12 e; +mcl::bn256::pairing(e, P, Q); +``` +1. (BN254) a BN curve over the 254-bit prime p = p(z) where z = -(2^62 + 2^55 + 1). +2. (BN_SNARK1) a BN curve over a 254-bit prime p such that n := p + 1 - t has high 2-adicity. +3. BN381_1 with `mcl/bn384.hpp`. +4. BN462 with `mcl/bn512.hpp`. + +See [test/bn_test.cpp](https://github.com/herumi/mcl/blob/master/test/bn_test.cpp). + +## Default constructor of Fp, Ec, etc. +A default constructor does not initialize the instance. +Set a valid value before reffering it. + +## Definition of groups + +The curve equation for a BN curve is: + + E/Fp: y^2 = x^3 + b . + +* the cyclic group G1 is instantiated as E(Fp)[n] where n := p + 1 - t; +* the cyclic group G2 is instantiated as the inverse image of E'(Fp^2)[n] under a twisting isomorphism phi from E' to E; and +* the pairing e: G1 x G2 -> Fp12 is the optimal ate pairing. + +The field Fp12 is constructed via the following tower: + +* Fp2 = Fp[u] / (u^2 + 1) +* Fp6 = Fp2[v] / (v^3 - Xi) where Xi = u + 1 +* Fp12 = Fp6[w] / (w^2 - v) +* GT = { x in Fp12 | x^r = 1 } + + +## Arithmetic operations + +G1 and G2 is additive group and has the following operations: + +* T::add(T& z, const T& x, const T& y); // z = x + y +* T::sub(T& z, const T& x, const T& y); // z = x - y +* T::neg(T& y, const T& x); // y = -x +* T::mul(T& z, const T& x, const INT& y); // z = y times scalar multiplication of x + +Remark: &z == &x or &y are allowed. INT means integer type such as Fr, int and mpz_class. + +`T::mul` uses GLV method then `G2::mul` returns wrong value if x is not in G2. +Use `T::mulGeneric(T& z, const T& x, const INT& y)` for x in phi^-1(E'(Fp^2)) - G2. + +Fp, Fp2, Fp6 and Fp12 have the following operations: + +* T::add(T& z, const T& x, const T& y); // z = x + y +* T::sub(T& z, const T& x, const T& y); // z = x - y +* T::mul(T& z, const T& x, const T& y); // z = x * y +* T::div(T& z, const T& x, const T& y); // z = x / y +* T::neg(T& y, const T& x); // y = -x +* T::inv(T& y, const T& x); // y = 1/x +* T::pow(T& z, const T& x, const INT& y); // z = x^y +* Fp12::unitaryInv(T& y, const T& x); // y = conjugate of x + +Remark: `Fp12::mul` uses GLV method then returns wrong value if x is not in GT. +Use `Fp12::mulGeneric` for x in Fp12 - GT. + +## Map To points + +* mapToG1(G1& P, const Fp& x); +* mapToG2(G2& P, const Fp2& x); + +These functions maps x into Gi according to [_Faster hashing to G2_]. + +## String format of G1 and G2 +G1 and G2 have three elements of Fp (x, y, z) for Jacobi coordinate. +normalize() method normalizes it to affine coordinate (x, y, 1) or (0, 0, 0). + +getStr() method gets + +* `0` ; infinity +* `1 <x> <y>` ; not compressed format +* `2 <x>` ; compressed format for even y +* `3 <x>` ; compressed format for odd y + +## Verify an element in G2 +`G2::isValid()` checks that the element is in the curve of G2 and the order of it is r for subgroup attack. +`G2::set()`, `G2::setStr` and `operator<<` also check the order. +If you check it out of the library, then you can stop the verification by calling `G2::verifyOrderG2(false)`. + +# How to make asm files (optional) +The asm files generated by this way are already put in `src/asm`, then it is not necessary to do this. + +Install [LLVM](http://llvm.org/). +``` +make MCL_USE_LLVM=1 LLVM_VER=<llvm-version> UPDATE_ASM=1 +``` +For example, specify `-3.8` for `<llvm-version>` if `opt-3.8` and `llc-3.8` are installed. + +If you want to use Fp with 1024-bit prime on x86-64, then +``` +make MCL_USE_LLVM=1 LLVM_VER=<llvm-version> UPDATE_ASM=1 MCL_MAX_BIT_SIZE=1024 +``` + +# API for Two level homomorphic encryption +* [_Efficient Two-level Homomorphic Encryption in Prime-order Bilinear Groups and A Fast Implementation in WebAssembly_](https://dl.acm.org/citation.cfm?doid=3196494.3196552), N. Attrapadung, G. Hanaoka, S. Mitsunari, Y. Sakai, +K. Shimizu, and T. Teruya. ASIACCS 2018 +* [she-api](https://github.com/herumi/mcl/blob/master/misc/she/she-api.md) +* [she-api(Japanese)](https://github.com/herumi/mcl/blob/master/misc/she/she-api-ja.md) + +# Java API +See [java.md](https://github.com/herumi/mcl/blob/master/java/java.md) + +# License + +modified new BSD License +http://opensource.org/licenses/BSD-3-Clause + +This library contains some part of the followings software licensed by BSD-3-Clause. +* [xbyak](https://github.com/heurmi/xbyak) +* [cybozulib](https://github.com/heurmi/cybozulib) +* [Lifted-ElGamal](https://github.com/aistcrypt/Lifted-ElGamal) + +# References +* [ate-pairing](https://github.com/herumi/ate-pairing/) +* [_Faster Explicit Formulas for Computing Pairings over Ordinary Curves_](http://dx.doi.org/10.1007/978-3-642-20465-4_5), + D.F. Aranha, K. Karabina, P. Longa, C.H. Gebotys, J. Lopez, + EUROCRYPTO 2011, ([preprint](http://eprint.iacr.org/2010/526)) +* [_High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves_](http://dx.doi.org/10.1007/978-3-642-17455-1_2), + Jean-Luc Beuchat, Jorge Enrique González Díaz, Shigeo Mitsunari, Eiji Okamoto, Francisco Rodríguez-Henríquez, Tadanori Teruya, + Pairing 2010, ([preprint](http://eprint.iacr.org/2010/354)) +* [_Faster hashing to G2_](http://dx.doi.org/10.1007/978-3-642-28496-0_25),Laura Fuentes-Castañeda, Edward Knapp, Francisco Rodríguez-Henríquez, + SAC 2011, ([preprint](https://eprint.iacr.org/2008/530)) +* [_Skew Frobenius Map and Efficient Scalar Multiplication for Pairing–Based Cryptography_](https://www.researchgate.net/publication/221282560_Skew_Frobenius_Map_and_Efficient_Scalar_Multiplication_for_Pairing-Based_Cryptography), +Y. Sakemi, Y. Nogami, K. Okeya, Y. Morikawa, CANS 2008. + +# Author + +光成滋生 MITSUNARI Shigeo(herumi@nifty.com) diff --git a/vendor/github.com/dexon-foundation/mcl/release.props b/vendor/github.com/dexon-foundation/mcl/release.props new file mode 100644 index 000000000..886ce6890 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/release.props @@ -0,0 +1,12 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ImportGroup Label="PropertySheets" /> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup /> + <ItemDefinitionGroup> + <ClCompile> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + </ClCompile> + </ItemDefinitionGroup> + <ItemGroup /> +</Project>
\ No newline at end of file diff --git a/vendor/github.com/dexon-foundation/mcl/setvar.bat b/vendor/github.com/dexon-foundation/mcl/setvar.bat new file mode 100644 index 000000000..1d57fa69e --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/setvar.bat @@ -0,0 +1,2 @@ +set CFLAGS=/MT /DNOMINMAX /Ox /DNDEBUG /W4 /Zi /EHsc /nologo -I./include -I../cybozulib_ext/include +set LDFLAGS=/LIBPATH:..\cybozulib_ext\lib /LIBPATH:.\lib diff --git a/vendor/github.com/dexon-foundation/mcl/src/asm/aarch64.s b/vendor/github.com/dexon-foundation/mcl/src/asm/aarch64.s new file mode 100644 index 000000000..a49a36e3a --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/src/asm/aarch64.s @@ -0,0 +1,13197 @@ + .text + .file "<stdin>" + .globl makeNIST_P192L + .align 2 + .type makeNIST_P192L,@function +makeNIST_P192L: // @makeNIST_P192L +// BB#0: + movn x0, #0 + orr x1, xzr, #0xfffffffffffffffe + movn x2, #0 + ret +.Lfunc_end0: + .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L + + .globl mcl_fpDbl_mod_NIST_P192L + .align 2 + .type mcl_fpDbl_mod_NIST_P192L,@function +mcl_fpDbl_mod_NIST_P192L: // @mcl_fpDbl_mod_NIST_P192L +// BB#0: + ldp x8, x9, [x1, #16] + ldp x10, x11, [x1, #32] + ldp x12, x13, [x1] + orr w14, wzr, #0x1 + adds x13, x11, x13 + adcs x8, x8, xzr + adcs x15, xzr, xzr + adds x12, x12, x9 + adcs x13, x13, x10 + adcs x8, x8, x11 + adcs x15, x15, xzr + adds x11, x12, x11 + movn x12, #0 + adcs x9, x13, x9 + adcs x8, x8, x10 + adcs x10, x15, xzr + adds x11, x10, x11 + adcs x9, x10, x9 + adcs x8, x8, xzr + adcs x10, xzr, xzr + adds x13, x11, #1 // =1 + adcs x14, x9, x14 + adcs x15, x8, xzr + adcs x10, x10, x12 + tst x10, #0x1 + csel x10, x11, x13, ne + csel x9, x9, x14, ne + csel x8, x8, x15, ne + stp x10, x9, [x0] + str x8, [x0, #16] + ret +.Lfunc_end1: + .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L + + .globl mcl_fp_sqr_NIST_P192L + .align 2 + .type mcl_fp_sqr_NIST_P192L,@function +mcl_fp_sqr_NIST_P192L: // @mcl_fp_sqr_NIST_P192L +// BB#0: + ldp x8, x9, [x1] + ldr x10, [x1, #16] + orr w11, wzr, #0x1 + umulh x12, x8, x8 + mul x13, x9, x8 + mul x14, x10, x8 + umulh x15, x9, x8 + adds x12, x12, x13 + umulh x16, x10, x8 + adcs x17, x15, x14 + adcs x18, x16, xzr + mul x1, x9, x9 + mul x2, x10, x9 + adds x15, x15, x1 + umulh x1, x9, x9 + umulh x9, x10, x9 + adcs x1, x1, x2 + adcs x3, x9, xzr + adds x12, x13, x12 + adcs x13, x15, x17 + adcs x15, x1, x18 + movn x17, #0 + umulh x18, x10, x10 + mul x10, x10, x10 + mul x8, x8, x8 + adcs x1, x3, xzr + adds x16, x16, x2 + adcs x9, x9, x10 + adcs x10, x18, xzr + adds x13, x14, x13 + adcs x14, x16, x15 + adcs x9, x9, x1 + adcs x10, x10, xzr + adds x12, x12, x10 + adcs x13, x13, xzr + adcs x15, xzr, xzr + adds x8, x8, x14 + adcs x12, x12, x9 + adcs x13, x13, x10 + adcs x15, x15, xzr + adds x8, x8, x10 + adcs x10, x12, x14 + adcs x9, x13, x9 + adcs x12, x15, xzr + adds x8, x12, x8 + adcs x10, x12, x10 + adcs x9, x9, xzr + adcs x12, xzr, xzr + adds x13, x8, #1 // =1 + adcs x11, x10, x11 + adcs x14, x9, xzr + adcs x12, x12, x17 + tst x12, #0x1 + csel x8, x8, x13, ne + csel x10, x10, x11, ne + csel x9, x9, x14, ne + stp x8, x10, [x0] + str x9, [x0, #16] + ret +.Lfunc_end2: + .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L + + .globl mcl_fp_mulNIST_P192L + .align 2 + .type mcl_fp_mulNIST_P192L,@function +mcl_fp_mulNIST_P192L: // @mcl_fp_mulNIST_P192L +// BB#0: + stp x20, x19, [sp, #-32]! + stp x29, x30, [sp, #16] + add x29, sp, #16 // =16 + sub sp, sp, #48 // =48 + mov x19, x0 + mov x0, sp + bl mcl_fpDbl_mulPre3L + ldp x9, x8, [sp, #8] + ldp x11, x10, [sp, #32] + ldr x12, [sp, #24] + ldr x13, [sp] + orr w14, wzr, #0x1 + adds x9, x10, x9 + adcs x8, x8, xzr + adcs x15, xzr, xzr + adds x13, x13, x12 + adcs x9, x9, x11 + adcs x8, x8, x10 + adcs x15, x15, xzr + adds x10, x13, x10 + movn x13, #0 + adcs x9, x9, x12 + adcs x8, x8, x11 + adcs x11, x15, xzr + adds x10, x11, x10 + adcs x9, x11, x9 + adcs x8, x8, xzr + adcs x11, xzr, xzr + adds x12, x10, #1 // =1 + adcs x14, x9, x14 + adcs x15, x8, xzr + adcs x11, x11, x13 + tst x11, #0x1 + csel x10, x10, x12, ne + csel x9, x9, x14, ne + csel x8, x8, x15, ne + stp x10, x9, [x19] + str x8, [x19, #16] + sub sp, x29, #16 // =16 + ldp x29, x30, [sp, #16] + ldp x20, x19, [sp], #32 + ret +.Lfunc_end3: + .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L + + .globl mcl_fpDbl_mod_NIST_P521L + .align 2 + .type mcl_fpDbl_mod_NIST_P521L,@function +mcl_fpDbl_mod_NIST_P521L: // @mcl_fpDbl_mod_NIST_P521L +// BB#0: + stp x29, x30, [sp, #-16]! + mov x29, sp + ldp x8, x9, [x1, #112] + ldr x10, [x1, #128] + ldp x11, x12, [x1, #96] + ldp x13, x14, [x1, #80] + ldp x15, x16, [x1, #64] + ldp x17, x18, [x1, #48] + ldp x2, x3, [x1, #32] + ldp x4, x5, [x1, #16] + ldp x6, x1, [x1] + extr x7, x10, x9, #9 + extr x9, x9, x8, #9 + extr x8, x8, x12, #9 + extr x12, x12, x11, #9 + extr x11, x11, x14, #9 + extr x14, x14, x13, #9 + extr x13, x13, x16, #9 + extr x16, x16, x15, #9 + and x15, x15, #0x1ff + lsr x10, x10, #9 + adds x16, x16, x6 + adcs x13, x13, x1 + adcs x14, x14, x4 + adcs x11, x11, x5 + adcs x12, x12, x2 + adcs x1, x8, x3 + adcs x17, x9, x17 + adcs x18, x7, x18 + adcs x2, x10, x15 + ubfx x8, x2, #9, #1 + adds x8, x8, x16 + adcs x9, x13, xzr + and x13, x9, x8 + adcs x10, x14, xzr + and x13, x13, x10 + adcs x11, x11, xzr + and x13, x13, x11 + adcs x12, x12, xzr + and x14, x13, x12 + adcs x13, x1, xzr + and x15, x14, x13 + adcs x14, x17, xzr + and x16, x15, x14 + adcs x15, x18, xzr + and x17, x16, x15 + adcs x16, x2, xzr + orr x18, x16, #0xfffffffffffffe00 + and x17, x17, x18 + cmn x17, #1 // =1 + b.eq .LBB4_2 +// BB#1: // %nonzero + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + stp x12, x13, [x0, #32] + stp x14, x15, [x0, #48] + and x8, x16, #0x1ff + str x8, [x0, #64] + ldp x29, x30, [sp], #16 + ret +.LBB4_2: // %zero + mov w1, wzr + movz w2, #0x48 + bl memset + ldp x29, x30, [sp], #16 + ret +.Lfunc_end4: + .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L + + .globl mcl_fp_mulUnitPre1L + .align 2 + .type mcl_fp_mulUnitPre1L,@function +mcl_fp_mulUnitPre1L: // @mcl_fp_mulUnitPre1L +// BB#0: + ldr x8, [x1] + mul x9, x8, x2 + umulh x8, x8, x2 + stp x9, x8, [x0] + ret +.Lfunc_end5: + .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L + + .globl mcl_fpDbl_mulPre1L + .align 2 + .type mcl_fpDbl_mulPre1L,@function +mcl_fpDbl_mulPre1L: // @mcl_fpDbl_mulPre1L +// BB#0: + ldr x8, [x1] + ldr x9, [x2] + mul x10, x9, x8 + umulh x8, x9, x8 + stp x10, x8, [x0] + ret +.Lfunc_end6: + .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L + + .globl mcl_fpDbl_sqrPre1L + .align 2 + .type mcl_fpDbl_sqrPre1L,@function +mcl_fpDbl_sqrPre1L: // @mcl_fpDbl_sqrPre1L +// BB#0: + ldr x8, [x1] + mul x9, x8, x8 + umulh x8, x8, x8 + stp x9, x8, [x0] + ret +.Lfunc_end7: + .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L + + .globl mcl_fp_mont1L + .align 2 + .type mcl_fp_mont1L,@function +mcl_fp_mont1L: // @mcl_fp_mont1L +// BB#0: + ldr x8, [x2] + ldr x9, [x1] + ldur x10, [x3, #-8] + ldr x11, [x3] + umulh x12, x9, x8 + mul x8, x9, x8 + mul x9, x8, x10 + umulh x10, x9, x11 + mul x9, x9, x11 + cmn x9, x8 + adcs x8, x10, x12 + adcs x9, xzr, xzr + subs x10, x8, x11 + sbcs x9, x9, xzr + tst x9, #0x1 + csel x8, x8, x10, ne + str x8, [x0] + ret +.Lfunc_end8: + .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L + + .globl mcl_fp_montNF1L + .align 2 + .type mcl_fp_montNF1L,@function +mcl_fp_montNF1L: // @mcl_fp_montNF1L +// BB#0: + ldr x8, [x2] + ldr x9, [x1] + ldur x10, [x3, #-8] + ldr x11, [x3] + umulh x12, x9, x8 + mul x8, x9, x8 + mul x9, x8, x10 + umulh x10, x9, x11 + mul x9, x9, x11 + cmn x9, x8 + adcs x8, x10, x12 + sub x9, x8, x11 + cmp x9, #0 // =0 + csel x8, x8, x9, lt + str x8, [x0] + ret +.Lfunc_end9: + .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L + + .globl mcl_fp_montRed1L + .align 2 + .type mcl_fp_montRed1L,@function +mcl_fp_montRed1L: // @mcl_fp_montRed1L +// BB#0: + ldur x8, [x2, #-8] + ldp x9, x11, [x1] + ldr x10, [x2] + mul x8, x9, x8 + umulh x12, x8, x10 + mul x8, x8, x10 + cmn x9, x8 + adcs x8, x11, x12 + adcs x9, xzr, xzr + subs x10, x8, x10 + sbcs x9, x9, xzr + tst x9, #0x1 + csel x8, x8, x10, ne + str x8, [x0] + ret +.Lfunc_end10: + .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L + + .globl mcl_fp_addPre1L + .align 2 + .type mcl_fp_addPre1L,@function +mcl_fp_addPre1L: // @mcl_fp_addPre1L +// BB#0: + ldr x8, [x1] + ldr x9, [x2] + adds x9, x9, x8 + adcs x8, xzr, xzr + str x9, [x0] + mov x0, x8 + ret +.Lfunc_end11: + .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L + + .globl mcl_fp_subPre1L + .align 2 + .type mcl_fp_subPre1L,@function +mcl_fp_subPre1L: // @mcl_fp_subPre1L +// BB#0: + ldr x8, [x2] + ldr x9, [x1] + subs x9, x9, x8 + ngcs x8, xzr + and x8, x8, #0x1 + str x9, [x0] + mov x0, x8 + ret +.Lfunc_end12: + .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L + + .globl mcl_fp_shr1_1L + .align 2 + .type mcl_fp_shr1_1L,@function +mcl_fp_shr1_1L: // @mcl_fp_shr1_1L +// BB#0: + ldr x8, [x1] + lsr x8, x8, #1 + str x8, [x0] + ret +.Lfunc_end13: + .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L + + .globl mcl_fp_add1L + .align 2 + .type mcl_fp_add1L,@function +mcl_fp_add1L: // @mcl_fp_add1L +// BB#0: + ldr x8, [x1] + ldr x9, [x2] + ldr x10, [x3] + adds x8, x9, x8 + str x8, [x0] + adcs x9, xzr, xzr + subs x8, x8, x10 + sbcs x9, x9, xzr + and w9, w9, #0x1 + tbnz w9, #0, .LBB14_2 +// BB#1: // %nocarry + str x8, [x0] +.LBB14_2: // %carry + ret +.Lfunc_end14: + .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L + + .globl mcl_fp_addNF1L + .align 2 + .type mcl_fp_addNF1L,@function +mcl_fp_addNF1L: // @mcl_fp_addNF1L +// BB#0: + ldr x8, [x1] + ldr x9, [x2] + ldr x10, [x3] + add x8, x9, x8 + sub x9, x8, x10 + cmp x9, #0 // =0 + csel x8, x8, x9, lt + str x8, [x0] + ret +.Lfunc_end15: + .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L + + .globl mcl_fp_sub1L + .align 2 + .type mcl_fp_sub1L,@function +mcl_fp_sub1L: // @mcl_fp_sub1L +// BB#0: + ldr x8, [x2] + ldr x9, [x1] + subs x8, x9, x8 + str x8, [x0] + ngcs x9, xzr + and w9, w9, #0x1 + tbnz w9, #0, .LBB16_2 +// BB#1: // %nocarry + ret +.LBB16_2: // %carry + ldr x9, [x3] + add x8, x9, x8 + str x8, [x0] + ret +.Lfunc_end16: + .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L + + .globl mcl_fp_subNF1L + .align 2 + .type mcl_fp_subNF1L,@function +mcl_fp_subNF1L: // @mcl_fp_subNF1L +// BB#0: + ldr x8, [x2] + ldr x9, [x1] + ldr x10, [x3] + sub x8, x9, x8 + and x9, x10, x8, asr #63 + add x8, x9, x8 + str x8, [x0] + ret +.Lfunc_end17: + .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L + + .globl mcl_fpDbl_add1L + .align 2 + .type mcl_fpDbl_add1L,@function +mcl_fpDbl_add1L: // @mcl_fpDbl_add1L +// BB#0: + ldp x8, x11, [x1] + ldp x9, x10, [x2] + ldr x12, [x3] + adds x8, x9, x8 + str x8, [x0] + adcs x8, x10, x11 + adcs x9, xzr, xzr + subs x10, x8, x12 + sbcs x9, x9, xzr + tst x9, #0x1 + csel x8, x8, x10, ne + str x8, [x0, #8] + ret +.Lfunc_end18: + .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L + + .globl mcl_fpDbl_sub1L + .align 2 + .type mcl_fpDbl_sub1L,@function +mcl_fpDbl_sub1L: // @mcl_fpDbl_sub1L +// BB#0: + ldp x8, x11, [x1] + ldp x9, x10, [x2] + ldr x12, [x3] + subs x8, x8, x9 + str x8, [x0] + sbcs x8, x11, x10 + ngcs x9, xzr + tst x9, #0x1 + csel x9, x12, xzr, ne + add x8, x9, x8 + str x8, [x0, #8] + ret +.Lfunc_end19: + .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L + + .globl mcl_fp_mulUnitPre2L + .align 2 + .type mcl_fp_mulUnitPre2L,@function +mcl_fp_mulUnitPre2L: // @mcl_fp_mulUnitPre2L +// BB#0: + ldp x8, x9, [x1] + mul x10, x8, x2 + mul x11, x9, x2 + umulh x8, x8, x2 + umulh x9, x9, x2 + adds x8, x8, x11 + stp x10, x8, [x0] + adcs x8, x9, xzr + str x8, [x0, #16] + ret +.Lfunc_end20: + .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L + + .globl mcl_fpDbl_mulPre2L + .align 2 + .type mcl_fpDbl_mulPre2L,@function +mcl_fpDbl_mulPre2L: // @mcl_fpDbl_mulPre2L +// BB#0: + ldp x8, x11, [x2] + ldp x9, x10, [x1] + mul x12, x9, x8 + umulh x13, x10, x8 + mul x14, x10, x8 + umulh x8, x9, x8 + mul x15, x9, x11 + mul x16, x10, x11 + umulh x9, x9, x11 + umulh x10, x10, x11 + adds x8, x8, x14 + adcs x11, x13, xzr + adds x8, x8, x15 + stp x12, x8, [x0] + adcs x8, x11, x16 + adcs x11, xzr, xzr + adds x8, x8, x9 + str x8, [x0, #16] + adcs x8, x11, x10 + str x8, [x0, #24] + ret +.Lfunc_end21: + .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L + + .globl mcl_fpDbl_sqrPre2L + .align 2 + .type mcl_fpDbl_sqrPre2L,@function +mcl_fpDbl_sqrPre2L: // @mcl_fpDbl_sqrPre2L +// BB#0: + ldp x8, x9, [x1] + mul x10, x8, x8 + umulh x11, x9, x8 + mul x12, x9, x8 + umulh x8, x8, x8 + umulh x13, x9, x9 + mul x9, x9, x9 + str x10, [x0] + adds x8, x8, x12 + adcs x10, x11, xzr + adds x9, x11, x9 + adcs x11, x13, xzr + adds x8, x12, x8 + str x8, [x0, #8] + adcs x8, x9, x10 + str x8, [x0, #16] + adcs x8, x11, xzr + str x8, [x0, #24] + ret +.Lfunc_end22: + .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L + + .globl mcl_fp_mont2L + .align 2 + .type mcl_fp_mont2L,@function +mcl_fp_mont2L: // @mcl_fp_mont2L +// BB#0: + ldp x8, x14, [x2] + ldp x9, x10, [x1] + ldur x11, [x3, #-8] + ldp x12, x13, [x3] + umulh x15, x10, x8 + mul x16, x10, x8 + umulh x17, x9, x8 + mul x8, x9, x8 + umulh x18, x14, x10 + mul x10, x14, x10 + umulh x1, x14, x9 + mul x9, x14, x9 + adds x14, x17, x16 + mul x16, x8, x11 + adcs x15, x15, xzr + mul x17, x16, x13 + umulh x2, x16, x12 + adds x17, x2, x17 + umulh x2, x16, x13 + mul x16, x16, x12 + adcs x2, x2, xzr + cmn x16, x8 + adcs x8, x17, x14 + adcs x14, x2, x15 + adcs x15, xzr, xzr + adds x10, x1, x10 + adcs x16, x18, xzr + adds x8, x8, x9 + adcs x9, x14, x10 + mul x10, x8, x11 + adcs x11, x15, x16 + umulh x14, x10, x13 + mul x15, x10, x13 + umulh x16, x10, x12 + mul x10, x10, x12 + adcs x17, xzr, xzr + adds x15, x16, x15 + adcs x14, x14, xzr + cmn x10, x8 + adcs x8, x15, x9 + adcs x9, x14, x11 + adcs x10, x17, xzr + subs x11, x8, x12 + sbcs x12, x9, x13 + sbcs x10, x10, xzr + tst x10, #0x1 + csel x8, x8, x11, ne + csel x9, x9, x12, ne + stp x8, x9, [x0] + ret +.Lfunc_end23: + .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L + + .globl mcl_fp_montNF2L + .align 2 + .type mcl_fp_montNF2L,@function +mcl_fp_montNF2L: // @mcl_fp_montNF2L +// BB#0: + ldp x8, x14, [x2] + ldp x9, x10, [x1] + ldur x11, [x3, #-8] + ldp x12, x13, [x3] + umulh x15, x10, x8 + mul x16, x10, x8 + umulh x17, x9, x8 + mul x8, x9, x8 + umulh x18, x14, x10 + mul x10, x14, x10 + umulh x1, x14, x9 + mul x9, x14, x9 + adds x14, x17, x16 + mul x16, x8, x11 + adcs x15, x15, xzr + mul x17, x16, x12 + cmn x17, x8 + mul x8, x16, x13 + umulh x17, x16, x13 + umulh x16, x16, x12 + adcs x8, x8, x14 + adcs x14, x15, xzr + adds x8, x8, x16 + adcs x14, x14, x17 + adds x10, x1, x10 + adcs x15, x18, xzr + adds x8, x9, x8 + adcs x9, x10, x14 + mul x10, x8, x11 + adcs x11, x15, xzr + mul x14, x10, x13 + mul x15, x10, x12 + umulh x16, x10, x13 + umulh x10, x10, x12 + cmn x15, x8 + adcs x8, x14, x9 + adcs x9, x11, xzr + adds x8, x8, x10 + adcs x9, x9, x16 + subs x10, x8, x12 + sbcs x11, x9, x13 + cmp x11, #0 // =0 + csel x8, x8, x10, lt + csel x9, x9, x11, lt + stp x8, x9, [x0] + ret +.Lfunc_end24: + .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L + + .globl mcl_fp_montRed2L + .align 2 + .type mcl_fp_montRed2L,@function +mcl_fp_montRed2L: // @mcl_fp_montRed2L +// BB#0: + ldur x8, [x2, #-8] + ldp x9, x14, [x1] + ldp x10, x11, [x2] + ldp x12, x13, [x1, #16] + mul x15, x9, x8 + mul x16, x15, x11 + umulh x17, x15, x10 + adds x16, x17, x16 + umulh x17, x15, x11 + mul x15, x15, x10 + adcs x17, x17, xzr + cmn x9, x15 + adcs x9, x14, x16 + adcs x12, x12, x17 + mul x8, x9, x8 + adcs x13, x13, xzr + umulh x14, x8, x11 + mul x15, x8, x11 + umulh x16, x8, x10 + mul x8, x8, x10 + adcs x17, xzr, xzr + adds x15, x16, x15 + adcs x14, x14, xzr + cmn x8, x9 + adcs x8, x15, x12 + adcs x9, x14, x13 + adcs x12, x17, xzr + subs x10, x8, x10 + sbcs x11, x9, x11 + sbcs x12, x12, xzr + tst x12, #0x1 + csel x8, x8, x10, ne + csel x9, x9, x11, ne + stp x8, x9, [x0] + ret +.Lfunc_end25: + .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L + + .globl mcl_fp_addPre2L + .align 2 + .type mcl_fp_addPre2L,@function +mcl_fp_addPre2L: // @mcl_fp_addPre2L +// BB#0: + ldp x8, x11, [x1] + ldp x9, x10, [x2] + adds x8, x9, x8 + str x8, [x0] + adcs x9, x10, x11 + adcs x8, xzr, xzr + str x9, [x0, #8] + mov x0, x8 + ret +.Lfunc_end26: + .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L + + .globl mcl_fp_subPre2L + .align 2 + .type mcl_fp_subPre2L,@function +mcl_fp_subPre2L: // @mcl_fp_subPre2L +// BB#0: + ldp x8, x11, [x1] + ldp x9, x10, [x2] + subs x8, x8, x9 + str x8, [x0] + sbcs x9, x11, x10 + ngcs x8, xzr + and x8, x8, #0x1 + str x9, [x0, #8] + mov x0, x8 + ret +.Lfunc_end27: + .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L + + .globl mcl_fp_shr1_2L + .align 2 + .type mcl_fp_shr1_2L,@function +mcl_fp_shr1_2L: // @mcl_fp_shr1_2L +// BB#0: + ldp x8, x9, [x1] + extr x8, x9, x8, #1 + lsr x9, x9, #1 + stp x8, x9, [x0] + ret +.Lfunc_end28: + .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L + + .globl mcl_fp_add2L + .align 2 + .type mcl_fp_add2L,@function +mcl_fp_add2L: // @mcl_fp_add2L +// BB#0: + ldp x8, x11, [x1] + ldp x9, x10, [x2] + adds x8, x9, x8 + ldp x9, x12, [x3] + adcs x10, x10, x11 + stp x8, x10, [x0] + adcs x11, xzr, xzr + subs x9, x8, x9 + sbcs x8, x10, x12 + sbcs x10, x11, xzr + and w10, w10, #0x1 + tbnz w10, #0, .LBB29_2 +// BB#1: // %nocarry + stp x9, x8, [x0] +.LBB29_2: // %carry + ret +.Lfunc_end29: + .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L + + .globl mcl_fp_addNF2L + .align 2 + .type mcl_fp_addNF2L,@function +mcl_fp_addNF2L: // @mcl_fp_addNF2L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x2] + ldp x12, x13, [x3] + adds x8, x10, x8 + adcs x9, x11, x9 + subs x10, x8, x12 + sbcs x11, x9, x13 + cmp x11, #0 // =0 + csel x8, x8, x10, lt + csel x9, x9, x11, lt + stp x8, x9, [x0] + ret +.Lfunc_end30: + .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L + + .globl mcl_fp_sub2L + .align 2 + .type mcl_fp_sub2L,@function +mcl_fp_sub2L: // @mcl_fp_sub2L +// BB#0: + ldp x8, x11, [x1] + ldp x9, x10, [x2] + subs x9, x8, x9 + sbcs x8, x11, x10 + stp x9, x8, [x0] + ngcs x10, xzr + and w10, w10, #0x1 + tbnz w10, #0, .LBB31_2 +// BB#1: // %nocarry + ret +.LBB31_2: // %carry + ldp x10, x11, [x3] + adds x9, x10, x9 + adcs x8, x11, x8 + stp x9, x8, [x0] + ret +.Lfunc_end31: + .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L + + .globl mcl_fp_subNF2L + .align 2 + .type mcl_fp_subNF2L,@function +mcl_fp_subNF2L: // @mcl_fp_subNF2L +// BB#0: + ldp x8, x11, [x1] + ldp x9, x10, [x2] + subs x8, x8, x9 + ldp x9, x12, [x3] + sbcs x10, x11, x10 + asr x11, x10, #63 + and x9, x11, x9 + and x11, x11, x12 + adds x8, x9, x8 + str x8, [x0] + adcs x8, x11, x10 + str x8, [x0, #8] + ret +.Lfunc_end32: + .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L + + .globl mcl_fpDbl_add2L + .align 2 + .type mcl_fpDbl_add2L,@function +mcl_fpDbl_add2L: // @mcl_fpDbl_add2L +// BB#0: + ldp x8, x9, [x2, #16] + ldp x10, x15, [x1] + ldp x11, x14, [x2] + ldp x12, x13, [x1, #16] + adds x10, x11, x10 + ldp x11, x16, [x3] + str x10, [x0] + adcs x10, x14, x15 + str x10, [x0, #8] + adcs x8, x8, x12 + adcs x9, x9, x13 + adcs x10, xzr, xzr + subs x11, x8, x11 + sbcs x12, x9, x16 + sbcs x10, x10, xzr + tst x10, #0x1 + csel x8, x8, x11, ne + csel x9, x9, x12, ne + stp x8, x9, [x0, #16] + ret +.Lfunc_end33: + .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L + + .globl mcl_fpDbl_sub2L + .align 2 + .type mcl_fpDbl_sub2L,@function +mcl_fpDbl_sub2L: // @mcl_fpDbl_sub2L +// BB#0: + ldp x8, x9, [x2, #16] + ldp x10, x14, [x2] + ldp x11, x15, [x1] + ldp x12, x13, [x1, #16] + subs x10, x11, x10 + ldp x11, x16, [x3] + str x10, [x0] + sbcs x10, x15, x14 + str x10, [x0, #8] + sbcs x8, x12, x8 + sbcs x9, x13, x9 + ngcs x10, xzr + tst x10, #0x1 + csel x10, x16, xzr, ne + csel x11, x11, xzr, ne + adds x8, x11, x8 + str x8, [x0, #16] + adcs x8, x10, x9 + str x8, [x0, #24] + ret +.Lfunc_end34: + .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L + + .globl mcl_fp_mulUnitPre3L + .align 2 + .type mcl_fp_mulUnitPre3L,@function +mcl_fp_mulUnitPre3L: // @mcl_fp_mulUnitPre3L +// BB#0: + ldp x8, x9, [x1] + ldr x10, [x1, #16] + mul x11, x8, x2 + mul x12, x9, x2 + umulh x8, x8, x2 + mul x13, x10, x2 + umulh x9, x9, x2 + umulh x10, x10, x2 + adds x8, x8, x12 + stp x11, x8, [x0] + adcs x8, x9, x13 + str x8, [x0, #16] + adcs x8, x10, xzr + str x8, [x0, #24] + ret +.Lfunc_end35: + .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L + + .globl mcl_fpDbl_mulPre3L + .align 2 + .type mcl_fpDbl_mulPre3L,@function +mcl_fpDbl_mulPre3L: // @mcl_fpDbl_mulPre3L +// BB#0: + stp x20, x19, [sp, #-16]! + ldp x8, x9, [x1] + ldp x10, x12, [x2] + ldr x11, [x1, #16] + ldr x13, [x2, #16] + mul x14, x8, x10 + umulh x15, x11, x10 + mul x16, x11, x10 + umulh x17, x9, x10 + mul x18, x9, x10 + umulh x10, x8, x10 + mul x1, x8, x12 + mul x2, x11, x12 + mul x3, x9, x12 + umulh x4, x11, x12 + umulh x5, x9, x12 + umulh x12, x8, x12 + mul x6, x8, x13 + mul x7, x11, x13 + mul x19, x9, x13 + umulh x8, x8, x13 + umulh x9, x9, x13 + umulh x11, x11, x13 + str x14, [x0] + adds x10, x10, x18 + adcs x13, x17, x16 + adcs x14, x15, xzr + adds x10, x10, x1 + str x10, [x0, #8] + adcs x10, x13, x3 + adcs x13, x14, x2 + adcs x14, xzr, xzr + adds x10, x10, x12 + adcs x12, x13, x5 + adcs x13, x14, x4 + adds x10, x10, x6 + str x10, [x0, #16] + adcs x10, x12, x19 + adcs x12, x13, x7 + adcs x13, xzr, xzr + adds x8, x10, x8 + str x8, [x0, #24] + adcs x8, x12, x9 + str x8, [x0, #32] + adcs x8, x13, x11 + str x8, [x0, #40] + ldp x20, x19, [sp], #16 + ret +.Lfunc_end36: + .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L + + .globl mcl_fpDbl_sqrPre3L + .align 2 + .type mcl_fpDbl_sqrPre3L,@function +mcl_fpDbl_sqrPre3L: // @mcl_fpDbl_sqrPre3L +// BB#0: + ldp x8, x10, [x1] + ldr x9, [x1, #16] + mul x11, x8, x8 + umulh x12, x9, x8 + mul x13, x9, x8 + umulh x14, x10, x8 + mul x15, x10, x8 + umulh x8, x8, x8 + mul x16, x9, x10 + str x11, [x0] + adds x8, x8, x15 + adcs x11, x14, x13 + adcs x17, x12, xzr + adds x8, x8, x15 + mul x15, x10, x10 + str x8, [x0, #8] + umulh x8, x9, x10 + umulh x10, x10, x10 + adcs x11, x11, x15 + adcs x15, x17, x16 + adcs x17, xzr, xzr + adds x11, x11, x14 + umulh x14, x9, x9 + mul x9, x9, x9 + adcs x10, x15, x10 + adcs x15, x17, x8 + adds x12, x12, x16 + adcs x8, x8, x9 + adcs x9, x14, xzr + adds x11, x13, x11 + adcs x10, x12, x10 + stp x11, x10, [x0, #16] + adcs x8, x8, x15 + str x8, [x0, #32] + adcs x8, x9, xzr + str x8, [x0, #40] + ret +.Lfunc_end37: + .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L + + .globl mcl_fp_mont3L + .align 2 + .type mcl_fp_mont3L,@function +mcl_fp_mont3L: // @mcl_fp_mont3L +// BB#0: + stp x24, x23, [sp, #-48]! + stp x22, x21, [sp, #16] + stp x20, x19, [sp, #32] + ldp x15, x16, [x2] + ldp x13, x14, [x1, #8] + ldr x12, [x1] + ldur x11, [x3, #-8] + ldp x9, x8, [x3, #8] + ldr x10, [x3] + ldr x17, [x2, #16] + umulh x18, x14, x15 + mul x1, x14, x15 + umulh x2, x13, x15 + mul x3, x13, x15 + umulh x4, x12, x15 + mul x15, x12, x15 + umulh x5, x16, x14 + mul x6, x16, x14 + umulh x7, x16, x13 + mul x19, x16, x13 + umulh x20, x16, x12 + mul x16, x16, x12 + umulh x21, x17, x14 + mul x14, x17, x14 + adds x3, x4, x3 + mul x4, x15, x11 + adcs x1, x2, x1 + mul x2, x4, x8 + mul x22, x4, x9 + umulh x23, x4, x10 + adcs x18, x18, xzr + adds x22, x23, x22 + umulh x23, x4, x9 + adcs x2, x23, x2 + umulh x23, x4, x8 + mul x4, x4, x10 + adcs x23, x23, xzr + cmn x4, x15 + umulh x15, x17, x13 + mul x13, x17, x13 + umulh x4, x17, x12 + mul x12, x17, x12 + adcs x17, x22, x3 + adcs x1, x2, x1 + adcs x18, x23, x18 + adcs x2, xzr, xzr + adds x3, x20, x19 + adcs x6, x7, x6 + adcs x5, x5, xzr + adds x16, x17, x16 + adcs x17, x1, x3 + mul x1, x16, x11 + adcs x18, x18, x6 + mul x3, x1, x8 + mul x6, x1, x9 + umulh x7, x1, x10 + adcs x2, x2, x5 + adcs x5, xzr, xzr + adds x6, x7, x6 + umulh x7, x1, x9 + adcs x3, x7, x3 + umulh x7, x1, x8 + mul x1, x1, x10 + adcs x7, x7, xzr + cmn x1, x16 + adcs x16, x6, x17 + adcs x17, x3, x18 + adcs x18, x7, x2 + adcs x1, x5, xzr + adds x13, x4, x13 + adcs x14, x15, x14 + adcs x15, x21, xzr + adds x12, x16, x12 + adcs x13, x17, x13 + mul x11, x12, x11 + adcs x14, x18, x14 + umulh x16, x11, x8 + mul x17, x11, x8 + umulh x18, x11, x9 + mul x2, x11, x9 + umulh x3, x11, x10 + mul x11, x11, x10 + adcs x15, x1, x15 + adcs x1, xzr, xzr + adds x2, x3, x2 + adcs x17, x18, x17 + adcs x16, x16, xzr + cmn x11, x12 + adcs x11, x2, x13 + adcs x12, x17, x14 + adcs x13, x16, x15 + adcs x14, x1, xzr + subs x10, x11, x10 + sbcs x9, x12, x9 + sbcs x8, x13, x8 + sbcs x14, x14, xzr + tst x14, #0x1 + csel x10, x11, x10, ne + csel x9, x12, x9, ne + csel x8, x13, x8, ne + stp x10, x9, [x0] + str x8, [x0, #16] + ldp x20, x19, [sp, #32] + ldp x22, x21, [sp, #16] + ldp x24, x23, [sp], #48 + ret +.Lfunc_end38: + .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L + + .globl mcl_fp_montNF3L + .align 2 + .type mcl_fp_montNF3L,@function +mcl_fp_montNF3L: // @mcl_fp_montNF3L +// BB#0: + stp x22, x21, [sp, #-32]! + stp x20, x19, [sp, #16] + ldp x14, x16, [x2] + ldp x15, x13, [x1, #8] + ldr x12, [x1] + ldur x11, [x3, #-8] + ldp x9, x8, [x3, #8] + ldr x10, [x3] + ldr x17, [x2, #16] + umulh x18, x13, x14 + mul x1, x13, x14 + umulh x2, x15, x14 + mul x3, x15, x14 + umulh x4, x12, x14 + mul x14, x12, x14 + umulh x5, x16, x13 + mul x6, x16, x13 + umulh x7, x16, x15 + mul x19, x16, x15 + umulh x20, x16, x12 + mul x16, x16, x12 + umulh x21, x17, x13 + mul x13, x17, x13 + adds x3, x4, x3 + mul x4, x14, x11 + adcs x1, x2, x1 + mul x2, x4, x10 + adcs x18, x18, xzr + cmn x2, x14 + umulh x14, x17, x15 + mul x15, x17, x15 + umulh x2, x17, x12 + mul x12, x17, x12 + mul x17, x4, x9 + adcs x17, x17, x3 + mul x3, x4, x8 + adcs x1, x3, x1 + umulh x3, x4, x10 + adcs x18, x18, xzr + adds x17, x17, x3 + umulh x3, x4, x9 + adcs x1, x1, x3 + umulh x3, x4, x8 + adcs x18, x18, x3 + adds x3, x20, x19 + adcs x4, x7, x6 + adcs x5, x5, xzr + adds x16, x16, x17 + adcs x17, x3, x1 + mul x1, x16, x11 + adcs x18, x4, x18 + mul x3, x1, x8 + mul x4, x1, x10 + adcs x5, x5, xzr + cmn x4, x16 + mul x16, x1, x9 + umulh x4, x1, x8 + adcs x16, x16, x17 + umulh x17, x1, x9 + umulh x1, x1, x10 + adcs x18, x3, x18 + adcs x3, x5, xzr + adds x16, x16, x1 + adcs x17, x18, x17 + adcs x18, x3, x4 + adds x15, x2, x15 + adcs x13, x14, x13 + adcs x14, x21, xzr + adds x12, x12, x16 + adcs x15, x15, x17 + mul x11, x12, x11 + adcs x13, x13, x18 + mul x16, x11, x8 + mul x17, x11, x9 + mul x18, x11, x10 + umulh x1, x11, x8 + umulh x2, x11, x9 + umulh x11, x11, x10 + adcs x14, x14, xzr + cmn x18, x12 + adcs x12, x17, x15 + adcs x13, x16, x13 + adcs x14, x14, xzr + adds x11, x12, x11 + adcs x12, x13, x2 + adcs x13, x14, x1 + subs x10, x11, x10 + sbcs x9, x12, x9 + sbcs x8, x13, x8 + asr x14, x8, #63 + cmp x14, #0 // =0 + csel x10, x11, x10, lt + csel x9, x12, x9, lt + csel x8, x13, x8, lt + stp x10, x9, [x0] + str x8, [x0, #16] + ldp x20, x19, [sp, #16] + ldp x22, x21, [sp], #32 + ret +.Lfunc_end39: + .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L + + .globl mcl_fp_montRed3L + .align 2 + .type mcl_fp_montRed3L,@function +mcl_fp_montRed3L: // @mcl_fp_montRed3L +// BB#0: + ldur x8, [x2, #-8] + ldp x9, x17, [x1] + ldp x12, x10, [x2, #8] + ldr x11, [x2] + ldp x13, x14, [x1, #32] + ldp x15, x16, [x1, #16] + mul x18, x9, x8 + umulh x1, x18, x10 + mul x2, x18, x10 + umulh x3, x18, x12 + mul x4, x18, x12 + umulh x5, x18, x11 + mul x18, x18, x11 + adds x4, x5, x4 + adcs x2, x3, x2 + adcs x1, x1, xzr + cmn x9, x18 + adcs x9, x17, x4 + adcs x15, x15, x2 + mul x17, x9, x8 + adcs x16, x16, x1 + umulh x18, x17, x10 + mul x1, x17, x10 + umulh x2, x17, x12 + mul x3, x17, x12 + umulh x4, x17, x11 + mul x17, x17, x11 + adcs x13, x13, xzr + adcs x14, x14, xzr + adcs x5, xzr, xzr + adds x3, x4, x3 + adcs x1, x2, x1 + adcs x18, x18, xzr + cmn x17, x9 + adcs x9, x3, x15 + adcs x15, x1, x16 + mul x8, x9, x8 + adcs x13, x18, x13 + umulh x16, x8, x10 + mul x17, x8, x10 + umulh x18, x8, x12 + mul x1, x8, x12 + umulh x2, x8, x11 + mul x8, x8, x11 + adcs x14, x14, xzr + adcs x3, x5, xzr + adds x1, x2, x1 + adcs x17, x18, x17 + adcs x16, x16, xzr + cmn x8, x9 + adcs x8, x1, x15 + adcs x9, x17, x13 + adcs x13, x16, x14 + adcs x14, x3, xzr + subs x11, x8, x11 + sbcs x12, x9, x12 + sbcs x10, x13, x10 + sbcs x14, x14, xzr + tst x14, #0x1 + csel x8, x8, x11, ne + csel x9, x9, x12, ne + csel x10, x13, x10, ne + stp x8, x9, [x0] + str x10, [x0, #16] + ret +.Lfunc_end40: + .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L + + .globl mcl_fp_addPre3L + .align 2 + .type mcl_fp_addPre3L,@function +mcl_fp_addPre3L: // @mcl_fp_addPre3L +// BB#0: + ldp x11, x8, [x2, #8] + ldp x9, x12, [x1] + ldr x10, [x2] + ldr x13, [x1, #16] + adds x9, x10, x9 + str x9, [x0] + adcs x9, x11, x12 + str x9, [x0, #8] + adcs x9, x8, x13 + adcs x8, xzr, xzr + str x9, [x0, #16] + mov x0, x8 + ret +.Lfunc_end41: + .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L + + .globl mcl_fp_subPre3L + .align 2 + .type mcl_fp_subPre3L,@function +mcl_fp_subPre3L: // @mcl_fp_subPre3L +// BB#0: + ldp x11, x8, [x2, #8] + ldp x9, x12, [x1] + ldr x10, [x2] + ldr x13, [x1, #16] + subs x9, x9, x10 + str x9, [x0] + sbcs x9, x12, x11 + str x9, [x0, #8] + sbcs x9, x13, x8 + ngcs x8, xzr + and x8, x8, #0x1 + str x9, [x0, #16] + mov x0, x8 + ret +.Lfunc_end42: + .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L + + .globl mcl_fp_shr1_3L + .align 2 + .type mcl_fp_shr1_3L,@function +mcl_fp_shr1_3L: // @mcl_fp_shr1_3L +// BB#0: + ldp x8, x9, [x1] + ldr x10, [x1, #16] + extr x8, x9, x8, #1 + extr x9, x10, x9, #1 + lsr x10, x10, #1 + stp x8, x9, [x0] + str x10, [x0, #16] + ret +.Lfunc_end43: + .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L + + .globl mcl_fp_add3L + .align 2 + .type mcl_fp_add3L,@function +mcl_fp_add3L: // @mcl_fp_add3L +// BB#0: + ldp x11, x8, [x2, #8] + ldp x9, x12, [x1] + ldr x10, [x2] + ldr x13, [x1, #16] + adds x9, x10, x9 + adcs x11, x11, x12 + ldr x10, [x3] + ldp x12, x14, [x3, #8] + stp x9, x11, [x0] + adcs x8, x8, x13 + str x8, [x0, #16] + adcs x13, xzr, xzr + subs x10, x9, x10 + sbcs x9, x11, x12 + sbcs x8, x8, x14 + sbcs x11, x13, xzr + and w11, w11, #0x1 + tbnz w11, #0, .LBB44_2 +// BB#1: // %nocarry + stp x10, x9, [x0] + str x8, [x0, #16] +.LBB44_2: // %carry + ret +.Lfunc_end44: + .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L + + .globl mcl_fp_addNF3L + .align 2 + .type mcl_fp_addNF3L,@function +mcl_fp_addNF3L: // @mcl_fp_addNF3L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x2] + ldr x12, [x1, #16] + ldr x13, [x2, #16] + adds x8, x10, x8 + adcs x9, x11, x9 + ldp x10, x11, [x3] + ldr x14, [x3, #16] + adcs x12, x13, x12 + subs x10, x8, x10 + sbcs x11, x9, x11 + sbcs x13, x12, x14 + asr x14, x13, #63 + cmp x14, #0 // =0 + csel x8, x8, x10, lt + csel x9, x9, x11, lt + csel x10, x12, x13, lt + stp x8, x9, [x0] + str x10, [x0, #16] + ret +.Lfunc_end45: + .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L + + .globl mcl_fp_sub3L + .align 2 + .type mcl_fp_sub3L,@function +mcl_fp_sub3L: // @mcl_fp_sub3L +// BB#0: + ldp x11, x10, [x2, #8] + ldp x8, x12, [x1] + ldr x9, [x2] + ldr x13, [x1, #16] + subs x8, x8, x9 + sbcs x9, x12, x11 + stp x8, x9, [x0] + sbcs x10, x13, x10 + str x10, [x0, #16] + ngcs x11, xzr + and w11, w11, #0x1 + tbnz w11, #0, .LBB46_2 +// BB#1: // %nocarry + ret +.LBB46_2: // %carry + ldp x13, x11, [x3, #8] + ldr x12, [x3] + adds x8, x12, x8 + adcs x9, x13, x9 + adcs x10, x11, x10 + stp x8, x9, [x0] + str x10, [x0, #16] + ret +.Lfunc_end46: + .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L + + .globl mcl_fp_subNF3L + .align 2 + .type mcl_fp_subNF3L,@function +mcl_fp_subNF3L: // @mcl_fp_subNF3L +// BB#0: + ldp x8, x9, [x2] + ldp x10, x11, [x1] + ldr x12, [x2, #16] + ldr x13, [x1, #16] + subs x8, x10, x8 + sbcs x9, x11, x9 + ldp x10, x11, [x3] + ldr x14, [x3, #16] + sbcs x12, x13, x12 + asr x13, x12, #63 + and x11, x13, x11 + and x14, x13, x14 + extr x13, x13, x12, #63 + and x10, x13, x10 + adds x8, x10, x8 + str x8, [x0] + adcs x8, x11, x9 + str x8, [x0, #8] + adcs x8, x14, x12 + str x8, [x0, #16] + ret +.Lfunc_end47: + .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L + + .globl mcl_fpDbl_add3L + .align 2 + .type mcl_fpDbl_add3L,@function +mcl_fpDbl_add3L: // @mcl_fpDbl_add3L +// BB#0: + ldp x8, x9, [x2, #32] + ldp x10, x11, [x1, #32] + ldp x12, x13, [x2, #16] + ldp x15, x18, [x2] + ldp x16, x17, [x1, #16] + ldp x14, x1, [x1] + adds x14, x15, x14 + ldr x15, [x3, #16] + str x14, [x0] + ldp x14, x2, [x3] + adcs x18, x18, x1 + adcs x12, x12, x16 + stp x18, x12, [x0, #8] + adcs x12, x13, x17 + adcs x8, x8, x10 + adcs x9, x9, x11 + adcs x10, xzr, xzr + subs x11, x12, x14 + sbcs x13, x8, x2 + sbcs x14, x9, x15 + sbcs x10, x10, xzr + tst x10, #0x1 + csel x10, x12, x11, ne + csel x8, x8, x13, ne + csel x9, x9, x14, ne + stp x10, x8, [x0, #24] + str x9, [x0, #40] + ret +.Lfunc_end48: + .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L + + .globl mcl_fpDbl_sub3L + .align 2 + .type mcl_fpDbl_sub3L,@function +mcl_fpDbl_sub3L: // @mcl_fpDbl_sub3L +// BB#0: + ldp x8, x9, [x2, #32] + ldp x10, x11, [x1, #32] + ldp x12, x13, [x2, #16] + ldp x14, x18, [x2] + ldp x16, x17, [x1, #16] + ldp x15, x1, [x1] + subs x14, x15, x14 + ldr x15, [x3, #16] + str x14, [x0] + ldp x14, x2, [x3] + sbcs x18, x1, x18 + sbcs x12, x16, x12 + stp x18, x12, [x0, #8] + sbcs x12, x17, x13 + sbcs x8, x10, x8 + sbcs x9, x11, x9 + ngcs x10, xzr + tst x10, #0x1 + csel x10, x15, xzr, ne + csel x11, x2, xzr, ne + csel x13, x14, xzr, ne + adds x12, x13, x12 + adcs x8, x11, x8 + stp x12, x8, [x0, #24] + adcs x8, x10, x9 + str x8, [x0, #40] + ret +.Lfunc_end49: + .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L + + .globl mcl_fp_mulUnitPre4L + .align 2 + .type mcl_fp_mulUnitPre4L,@function +mcl_fp_mulUnitPre4L: // @mcl_fp_mulUnitPre4L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x1, #16] + mul x12, x8, x2 + mul x13, x9, x2 + umulh x8, x8, x2 + mul x14, x10, x2 + umulh x9, x9, x2 + mul x15, x11, x2 + umulh x10, x10, x2 + umulh x11, x11, x2 + adds x8, x8, x13 + stp x12, x8, [x0] + adcs x8, x9, x14 + str x8, [x0, #16] + adcs x8, x10, x15 + str x8, [x0, #24] + adcs x8, x11, xzr + str x8, [x0, #32] + ret +.Lfunc_end50: + .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L + + .globl mcl_fpDbl_mulPre4L + .align 2 + .type mcl_fpDbl_mulPre4L,@function +mcl_fpDbl_mulPre4L: // @mcl_fpDbl_mulPre4L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #32 // =32 + ldp x8, x10, [x1] + ldp x9, x11, [x1] + ldp x12, x14, [x1, #16] + ldp x13, x1, [x1, #16] + ldp x15, x16, [x2] + ldp x17, x18, [x2, #16] + mul x2, x8, x15 + umulh x3, x14, x15 + mul x4, x14, x15 + umulh x5, x12, x15 + mul x6, x12, x15 + umulh x7, x10, x15 + mul x19, x10, x15 + umulh x15, x8, x15 + mul x20, x8, x16 + mul x21, x14, x16 + mul x22, x12, x16 + mul x23, x10, x16 + umulh x24, x14, x16 + umulh x25, x12, x16 + umulh x26, x10, x16 + umulh x16, x8, x16 + mul x27, x8, x17 + mul x28, x14, x17 + mul x29, x12, x17 + mul x30, x10, x17 + umulh x14, x14, x17 + stp x3, x14, [sp, #16] + umulh x12, x12, x17 + str x12, [sp, #8] // 8-byte Folded Spill + umulh x3, x10, x17 + umulh x14, x8, x17 + mul x17, x9, x18 + umulh x12, x9, x18 + mul x10, x11, x18 + umulh x11, x11, x18 + mul x9, x13, x18 + umulh x13, x13, x18 + mul x8, x1, x18 + umulh x18, x1, x18 + str x2, [x0] + adds x15, x15, x19 + adcs x1, x7, x6 + adcs x2, x5, x4 + ldr x4, [sp, #16] // 8-byte Folded Reload + adcs x4, x4, xzr + adds x15, x20, x15 + str x15, [x0, #8] + adcs x15, x23, x1 + adcs x1, x22, x2 + adcs x2, x21, x4 + adcs x4, xzr, xzr + adds x15, x15, x16 + adcs x16, x1, x26 + adcs x1, x2, x25 + adcs x2, x4, x24 + adds x15, x15, x27 + str x15, [x0, #16] + adcs x15, x16, x30 + adcs x16, x1, x29 + adcs x1, x2, x28 + adcs x2, xzr, xzr + adds x14, x15, x14 + adcs x15, x16, x3 + ldr x16, [sp, #8] // 8-byte Folded Reload + adcs x16, x1, x16 + ldr x1, [sp, #24] // 8-byte Folded Reload + adcs x1, x2, x1 + adds x14, x14, x17 + str x14, [x0, #24] + adcs x10, x15, x10 + adcs x9, x16, x9 + adcs x8, x1, x8 + adcs x14, xzr, xzr + adds x10, x10, x12 + adcs x9, x9, x11 + stp x10, x9, [x0, #32] + adcs x8, x8, x13 + str x8, [x0, #48] + adcs x8, x14, x18 + str x8, [x0, #56] + add sp, sp, #32 // =32 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end51: + .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L + + .globl mcl_fpDbl_sqrPre4L + .align 2 + .type mcl_fpDbl_sqrPre4L,@function +mcl_fpDbl_sqrPre4L: // @mcl_fpDbl_sqrPre4L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x13, [x1] + ldp x11, x12, [x1, #16] + ldr x14, [x1, #16] + mul x15, x10, x10 + umulh x16, x12, x10 + mul x17, x12, x10 + umulh x18, x14, x10 + mul x2, x14, x10 + umulh x3, x9, x10 + mul x4, x9, x10 + umulh x10, x10, x10 + str x15, [x0] + adds x10, x10, x4 + adcs x15, x3, x2 + adcs x17, x18, x17 + adcs x16, x16, xzr + adds x10, x10, x4 + mul x4, x12, x9 + str x10, [x0, #8] + mul x10, x9, x9 + adcs x10, x15, x10 + mul x15, x14, x9 + adcs x17, x17, x15 + adcs x16, x16, x4 + adcs x4, xzr, xzr + adds x10, x10, x3 + umulh x3, x9, x9 + adcs x17, x17, x3 + umulh x3, x12, x9 + umulh x9, x14, x9 + adcs x16, x16, x9 + adcs x3, x4, x3 + ldr x1, [x1, #24] + adds x10, x10, x2 + mul x2, x12, x14 + str x10, [x0, #16] + mul x10, x14, x14 + umulh x12, x12, x14 + umulh x14, x14, x14 + adcs x15, x17, x15 + mul x17, x8, x1 + adcs x10, x16, x10 + mul x16, x11, x1 + adcs x2, x3, x2 + adcs x3, xzr, xzr + adds x15, x15, x18 + mul x18, x13, x1 + adcs x9, x10, x9 + mul x10, x1, x1 + umulh x8, x8, x1 + umulh x13, x13, x1 + umulh x11, x11, x1 + umulh x1, x1, x1 + adcs x14, x2, x14 + adcs x12, x3, x12 + adds x15, x15, x17 + adcs x9, x9, x18 + adcs x14, x14, x16 + adcs x10, x12, x10 + adcs x12, xzr, xzr + adds x8, x9, x8 + stp x15, x8, [x0, #24] + adcs x8, x14, x13 + str x8, [x0, #40] + adcs x8, x10, x11 + str x8, [x0, #48] + adcs x8, x12, x1 + str x8, [x0, #56] + ret +.Lfunc_end52: + .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L + + .globl mcl_fp_mont4L + .align 2 + .type mcl_fp_mont4L,@function +mcl_fp_mont4L: // @mcl_fp_mont4L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #16 // =16 + str x0, [sp, #8] // 8-byte Folded Spill + ldp x13, x16, [x1, #16] + ldp x14, x15, [x1] + ldur x0, [x3, #-8] + ldp x9, x8, [x3, #16] + ldp x11, x10, [x3] + ldp x17, x18, [x2] + ldp x1, x2, [x2, #16] + umulh x3, x16, x17 + mul x4, x16, x17 + umulh x5, x13, x17 + mul x6, x13, x17 + umulh x7, x15, x17 + mul x19, x15, x17 + umulh x20, x14, x17 + mul x17, x14, x17 + umulh x21, x18, x16 + mul x22, x18, x16 + umulh x23, x18, x13 + mul x24, x18, x13 + umulh x25, x18, x15 + mul x26, x18, x15 + umulh x27, x18, x14 + mul x18, x18, x14 + umulh x28, x1, x16 + adds x19, x20, x19 + mul x20, x17, x0 + adcs x6, x7, x6 + mul x7, x20, x8 + mul x29, x20, x9 + mul x30, x20, x10 + adcs x4, x5, x4 + umulh x5, x20, x11 + adcs x3, x3, xzr + adds x5, x5, x30 + umulh x30, x20, x10 + adcs x29, x30, x29 + umulh x30, x20, x9 + adcs x7, x30, x7 + umulh x30, x20, x8 + mul x20, x20, x11 + adcs x30, x30, xzr + cmn x20, x17 + mul x17, x1, x16 + umulh x20, x1, x13 + adcs x5, x5, x19 + mul x19, x1, x13 + adcs x6, x29, x6 + umulh x29, x1, x15 + adcs x4, x7, x4 + mul x7, x1, x15 + adcs x3, x30, x3 + adcs x30, xzr, xzr + adds x26, x27, x26 + umulh x27, x1, x14 + mul x1, x1, x14 + adcs x24, x25, x24 + umulh x25, x2, x16 + mul x16, x2, x16 + adcs x22, x23, x22 + adcs x21, x21, xzr + adds x18, x5, x18 + adcs x5, x6, x26 + mul x6, x18, x0 + adcs x4, x4, x24 + mul x23, x6, x8 + mul x24, x6, x9 + mul x26, x6, x10 + adcs x3, x3, x22 + umulh x22, x6, x11 + adcs x21, x30, x21 + adcs x30, xzr, xzr + adds x22, x22, x26 + umulh x26, x6, x10 + adcs x24, x26, x24 + umulh x26, x6, x9 + adcs x23, x26, x23 + umulh x26, x6, x8 + mul x6, x6, x11 + adcs x26, x26, xzr + cmn x6, x18 + umulh x18, x2, x13 + mul x13, x2, x13 + umulh x6, x2, x15 + mul x15, x2, x15 + umulh x12, x2, x14 + mul x14, x2, x14 + adcs x2, x22, x5 + adcs x4, x24, x4 + adcs x3, x23, x3 + adcs x5, x26, x21 + adcs x21, x30, xzr + adds x7, x27, x7 + adcs x19, x29, x19 + adcs x17, x20, x17 + adcs x20, x28, xzr + adds x1, x2, x1 + adcs x2, x4, x7 + mul x4, x1, x0 + adcs x3, x3, x19 + mul x7, x4, x8 + mul x19, x4, x9 + mul x22, x4, x10 + adcs x17, x5, x17 + umulh x5, x4, x11 + adcs x20, x21, x20 + adcs x21, xzr, xzr + adds x5, x5, x22 + umulh x22, x4, x10 + adcs x19, x22, x19 + umulh x22, x4, x9 + adcs x7, x22, x7 + umulh x22, x4, x8 + mul x4, x4, x11 + adcs x22, x22, xzr + cmn x4, x1 + adcs x1, x5, x2 + adcs x2, x19, x3 + adcs x17, x7, x17 + adcs x3, x22, x20 + adcs x4, x21, xzr + adds x12, x12, x15 + adcs x13, x6, x13 + adcs x15, x18, x16 + adcs x16, x25, xzr + adds x14, x1, x14 + adcs x12, x2, x12 + mul x18, x14, x0 + adcs x13, x17, x13 + umulh x17, x18, x8 + mul x0, x18, x8 + umulh x1, x18, x9 + mul x2, x18, x9 + umulh x5, x18, x10 + mul x6, x18, x10 + umulh x7, x18, x11 + mul x18, x18, x11 + adcs x15, x3, x15 + adcs x16, x4, x16 + adcs x3, xzr, xzr + adds x4, x7, x6 + adcs x2, x5, x2 + adcs x0, x1, x0 + adcs x17, x17, xzr + cmn x18, x14 + adcs x12, x4, x12 + adcs x13, x2, x13 + adcs x14, x0, x15 + adcs x15, x17, x16 + adcs x16, x3, xzr + subs x11, x12, x11 + sbcs x10, x13, x10 + sbcs x9, x14, x9 + sbcs x8, x15, x8 + sbcs x16, x16, xzr + tst x16, #0x1 + csel x11, x12, x11, ne + csel x10, x13, x10, ne + csel x9, x14, x9, ne + csel x8, x15, x8, ne + ldr x12, [sp, #8] // 8-byte Folded Reload + stp x11, x10, [x12] + stp x9, x8, [x12, #16] + add sp, sp, #16 // =16 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end53: + .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L + + .globl mcl_fp_montNF4L + .align 2 + .type mcl_fp_montNF4L,@function +mcl_fp_montNF4L: // @mcl_fp_montNF4L +// BB#0: + stp x28, x27, [sp, #-80]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + ldp x14, x15, [x1, #16] + ldp x13, x16, [x1] + ldur x12, [x3, #-8] + ldp x9, x8, [x3, #16] + ldp x11, x10, [x3] + ldp x17, x18, [x2] + ldp x1, x2, [x2, #16] + umulh x3, x15, x17 + mul x4, x15, x17 + umulh x5, x14, x17 + mul x6, x14, x17 + umulh x7, x16, x17 + mul x19, x16, x17 + umulh x20, x13, x17 + mul x17, x13, x17 + umulh x21, x18, x15 + mul x22, x18, x15 + umulh x23, x18, x14 + mul x24, x18, x14 + umulh x25, x18, x16 + mul x26, x18, x16 + umulh x27, x18, x13 + mul x18, x18, x13 + adds x19, x20, x19 + umulh x20, x1, x15 + adcs x6, x7, x6 + mul x7, x17, x12 + adcs x4, x5, x4 + mul x5, x7, x11 + adcs x3, x3, xzr + cmn x5, x17 + mul x17, x1, x15 + mul x5, x7, x10 + adcs x5, x5, x19 + mul x19, x7, x9 + adcs x6, x19, x6 + mul x19, x7, x8 + adcs x4, x19, x4 + umulh x19, x7, x11 + adcs x3, x3, xzr + adds x5, x5, x19 + umulh x19, x7, x10 + adcs x6, x6, x19 + umulh x19, x7, x9 + adcs x4, x4, x19 + umulh x19, x1, x14 + umulh x7, x7, x8 + adcs x3, x3, x7 + mul x7, x1, x14 + adds x26, x27, x26 + umulh x27, x1, x16 + adcs x24, x25, x24 + mul x25, x1, x16 + adcs x22, x23, x22 + umulh x23, x1, x13 + mul x1, x1, x13 + adcs x21, x21, xzr + adds x18, x18, x5 + umulh x5, x2, x15 + mul x15, x2, x15 + adcs x6, x26, x6 + umulh x26, x2, x14 + mul x14, x2, x14 + adcs x4, x24, x4 + mul x24, x18, x12 + adcs x3, x22, x3 + mul x22, x24, x11 + adcs x21, x21, xzr + cmn x22, x18 + umulh x18, x2, x16 + mul x16, x2, x16 + umulh x22, x2, x13 + mul x13, x2, x13 + mul x2, x24, x10 + adcs x2, x2, x6 + mul x6, x24, x9 + adcs x4, x6, x4 + mul x6, x24, x8 + adcs x3, x6, x3 + umulh x6, x24, x11 + adcs x21, x21, xzr + adds x2, x2, x6 + umulh x6, x24, x10 + adcs x4, x4, x6 + umulh x6, x24, x9 + adcs x3, x3, x6 + umulh x6, x24, x8 + adcs x6, x21, x6 + adds x21, x23, x25 + adcs x7, x27, x7 + adcs x17, x19, x17 + adcs x19, x20, xzr + adds x1, x1, x2 + adcs x2, x21, x4 + mul x4, x1, x12 + adcs x3, x7, x3 + mul x7, x4, x8 + mul x20, x4, x9 + adcs x17, x17, x6 + mul x6, x4, x11 + adcs x19, x19, xzr + cmn x6, x1 + mul x1, x4, x10 + umulh x6, x4, x8 + adcs x1, x1, x2 + umulh x2, x4, x9 + adcs x3, x20, x3 + umulh x20, x4, x10 + umulh x4, x4, x11 + adcs x17, x7, x17 + adcs x7, x19, xzr + adds x1, x1, x4 + adcs x3, x3, x20 + adcs x17, x17, x2 + adcs x2, x7, x6 + adds x16, x22, x16 + adcs x14, x18, x14 + adcs x15, x26, x15 + adcs x18, x5, xzr + adds x13, x13, x1 + adcs x16, x16, x3 + mul x12, x13, x12 + adcs x14, x14, x17 + mul x17, x12, x8 + mul x1, x12, x9 + mul x3, x12, x10 + mul x4, x12, x11 + umulh x5, x12, x8 + umulh x6, x12, x9 + umulh x7, x12, x10 + umulh x12, x12, x11 + adcs x15, x15, x2 + adcs x18, x18, xzr + cmn x4, x13 + adcs x13, x3, x16 + adcs x14, x1, x14 + adcs x15, x17, x15 + adcs x16, x18, xzr + adds x12, x13, x12 + adcs x13, x14, x7 + adcs x14, x15, x6 + adcs x15, x16, x5 + subs x11, x12, x11 + sbcs x10, x13, x10 + sbcs x9, x14, x9 + sbcs x8, x15, x8 + cmp x8, #0 // =0 + csel x11, x12, x11, lt + csel x10, x13, x10, lt + csel x9, x14, x9, lt + csel x8, x15, x8, lt + stp x11, x10, [x0] + stp x9, x8, [x0, #16] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #80 + ret +.Lfunc_end54: + .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L + + .globl mcl_fp_montRed4L + .align 2 + .type mcl_fp_montRed4L,@function +mcl_fp_montRed4L: // @mcl_fp_montRed4L +// BB#0: + stp x22, x21, [sp, #-32]! + stp x20, x19, [sp, #16] + ldur x12, [x2, #-8] + ldp x9, x8, [x2, #16] + ldp x11, x10, [x2] + ldp x14, x15, [x1, #48] + ldp x16, x17, [x1, #32] + ldp x18, x2, [x1, #16] + ldp x13, x1, [x1] + mul x3, x13, x12 + umulh x4, x3, x8 + mul x5, x3, x8 + umulh x6, x3, x9 + mul x7, x3, x9 + umulh x19, x3, x10 + mul x20, x3, x10 + umulh x21, x3, x11 + mul x3, x3, x11 + adds x20, x21, x20 + adcs x7, x19, x7 + adcs x5, x6, x5 + adcs x4, x4, xzr + cmn x13, x3 + adcs x13, x1, x20 + adcs x18, x18, x7 + mul x1, x13, x12 + adcs x2, x2, x5 + umulh x3, x1, x8 + mul x5, x1, x8 + umulh x6, x1, x9 + mul x7, x1, x9 + umulh x19, x1, x10 + mul x20, x1, x10 + umulh x21, x1, x11 + mul x1, x1, x11 + adcs x16, x16, x4 + adcs x17, x17, xzr + adcs x14, x14, xzr + adcs x15, x15, xzr + adcs x4, xzr, xzr + adds x20, x21, x20 + adcs x7, x19, x7 + adcs x5, x6, x5 + adcs x3, x3, xzr + cmn x1, x13 + adcs x13, x20, x18 + adcs x18, x7, x2 + mul x1, x13, x12 + adcs x16, x5, x16 + umulh x2, x1, x8 + mul x5, x1, x8 + umulh x6, x1, x9 + mul x7, x1, x9 + umulh x19, x1, x10 + mul x20, x1, x10 + umulh x21, x1, x11 + mul x1, x1, x11 + adcs x17, x3, x17 + adcs x14, x14, xzr + adcs x15, x15, xzr + adcs x3, x4, xzr + adds x4, x21, x20 + adcs x7, x19, x7 + adcs x5, x6, x5 + adcs x2, x2, xzr + cmn x1, x13 + adcs x13, x4, x18 + adcs x16, x7, x16 + mul x12, x13, x12 + adcs x17, x5, x17 + umulh x18, x12, x8 + mul x1, x12, x8 + umulh x4, x12, x9 + mul x5, x12, x9 + umulh x6, x12, x10 + mul x7, x12, x10 + umulh x19, x12, x11 + mul x12, x12, x11 + adcs x14, x2, x14 + adcs x15, x15, xzr + adcs x2, x3, xzr + adds x3, x19, x7 + adcs x5, x6, x5 + adcs x1, x4, x1 + adcs x18, x18, xzr + cmn x12, x13 + adcs x12, x3, x16 + adcs x13, x5, x17 + adcs x14, x1, x14 + adcs x15, x18, x15 + adcs x16, x2, xzr + subs x11, x12, x11 + sbcs x10, x13, x10 + sbcs x9, x14, x9 + sbcs x8, x15, x8 + sbcs x16, x16, xzr + tst x16, #0x1 + csel x11, x12, x11, ne + csel x10, x13, x10, ne + csel x9, x14, x9, ne + csel x8, x15, x8, ne + stp x11, x10, [x0] + stp x9, x8, [x0, #16] + ldp x20, x19, [sp, #16] + ldp x22, x21, [sp], #32 + ret +.Lfunc_end55: + .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L + + .globl mcl_fp_addPre4L + .align 2 + .type mcl_fp_addPre4L,@function +mcl_fp_addPre4L: // @mcl_fp_addPre4L +// BB#0: + ldp x8, x9, [x2, #16] + ldp x10, x11, [x2] + ldp x12, x13, [x1] + ldp x14, x15, [x1, #16] + adds x10, x10, x12 + str x10, [x0] + adcs x10, x11, x13 + adcs x8, x8, x14 + stp x10, x8, [x0, #8] + adcs x9, x9, x15 + adcs x8, xzr, xzr + str x9, [x0, #24] + mov x0, x8 + ret +.Lfunc_end56: + .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L + + .globl mcl_fp_subPre4L + .align 2 + .type mcl_fp_subPre4L,@function +mcl_fp_subPre4L: // @mcl_fp_subPre4L +// BB#0: + ldp x8, x9, [x2, #16] + ldp x10, x11, [x2] + ldp x12, x13, [x1] + ldp x14, x15, [x1, #16] + subs x10, x12, x10 + str x10, [x0] + sbcs x10, x13, x11 + sbcs x8, x14, x8 + stp x10, x8, [x0, #8] + sbcs x9, x15, x9 + ngcs x8, xzr + and x8, x8, #0x1 + str x9, [x0, #24] + mov x0, x8 + ret +.Lfunc_end57: + .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L + + .globl mcl_fp_shr1_4L + .align 2 + .type mcl_fp_shr1_4L,@function +mcl_fp_shr1_4L: // @mcl_fp_shr1_4L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x1, #16] + extr x8, x9, x8, #1 + extr x9, x10, x9, #1 + extr x10, x11, x10, #1 + lsr x11, x11, #1 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + ret +.Lfunc_end58: + .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L + + .globl mcl_fp_add4L + .align 2 + .type mcl_fp_add4L,@function +mcl_fp_add4L: // @mcl_fp_add4L +// BB#0: + ldp x8, x9, [x2, #16] + ldp x10, x11, [x2] + ldp x12, x13, [x1] + ldp x14, x15, [x1, #16] + adds x10, x10, x12 + adcs x12, x11, x13 + ldp x11, x13, [x3] + stp x10, x12, [x0] + adcs x8, x8, x14 + adcs x14, x9, x15 + stp x8, x14, [x0, #16] + adcs x15, xzr, xzr + ldp x9, x16, [x3, #16] + subs x11, x10, x11 + sbcs x10, x12, x13 + sbcs x9, x8, x9 + sbcs x8, x14, x16 + sbcs x12, x15, xzr + and w12, w12, #0x1 + tbnz w12, #0, .LBB59_2 +// BB#1: // %nocarry + stp x11, x10, [x0] + stp x9, x8, [x0, #16] +.LBB59_2: // %carry + ret +.Lfunc_end59: + .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L + + .globl mcl_fp_addNF4L + .align 2 + .type mcl_fp_addNF4L,@function +mcl_fp_addNF4L: // @mcl_fp_addNF4L +// BB#0: + ldp x8, x9, [x1, #16] + ldp x10, x11, [x1] + ldp x12, x13, [x2] + ldp x14, x15, [x2, #16] + adds x10, x12, x10 + adcs x11, x13, x11 + ldp x12, x13, [x3] + adcs x8, x14, x8 + ldp x14, x16, [x3, #16] + adcs x9, x15, x9 + subs x12, x10, x12 + sbcs x13, x11, x13 + sbcs x14, x8, x14 + sbcs x15, x9, x16 + cmp x15, #0 // =0 + csel x10, x10, x12, lt + csel x11, x11, x13, lt + csel x8, x8, x14, lt + csel x9, x9, x15, lt + stp x10, x11, [x0] + stp x8, x9, [x0, #16] + ret +.Lfunc_end60: + .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L + + .globl mcl_fp_sub4L + .align 2 + .type mcl_fp_sub4L,@function +mcl_fp_sub4L: // @mcl_fp_sub4L +// BB#0: + ldp x10, x11, [x2, #16] + ldp x8, x9, [x2] + ldp x12, x13, [x1] + ldp x14, x15, [x1, #16] + subs x8, x12, x8 + sbcs x9, x13, x9 + stp x8, x9, [x0] + sbcs x10, x14, x10 + sbcs x11, x15, x11 + stp x10, x11, [x0, #16] + ngcs x12, xzr + and w12, w12, #0x1 + tbnz w12, #0, .LBB61_2 +// BB#1: // %nocarry + ret +.LBB61_2: // %carry + ldp x12, x13, [x3, #16] + ldp x14, x15, [x3] + adds x8, x14, x8 + adcs x9, x15, x9 + adcs x10, x12, x10 + adcs x11, x13, x11 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + ret +.Lfunc_end61: + .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L + + .globl mcl_fp_subNF4L + .align 2 + .type mcl_fp_subNF4L,@function +mcl_fp_subNF4L: // @mcl_fp_subNF4L +// BB#0: + ldp x8, x9, [x2, #16] + ldp x10, x11, [x2] + ldp x12, x13, [x1] + ldp x14, x15, [x1, #16] + subs x10, x12, x10 + sbcs x11, x13, x11 + ldp x12, x13, [x3, #16] + sbcs x8, x14, x8 + ldp x14, x16, [x3] + sbcs x9, x15, x9 + asr x15, x9, #63 + and x14, x15, x14 + and x16, x15, x16 + and x12, x15, x12 + and x13, x15, x13 + adds x10, x14, x10 + str x10, [x0] + adcs x10, x16, x11 + adcs x8, x12, x8 + stp x10, x8, [x0, #8] + adcs x8, x13, x9 + str x8, [x0, #24] + ret +.Lfunc_end62: + .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L + + .globl mcl_fpDbl_add4L + .align 2 + .type mcl_fpDbl_add4L,@function +mcl_fpDbl_add4L: // @mcl_fpDbl_add4L +// BB#0: + ldp x8, x9, [x2, #48] + ldp x10, x11, [x1, #48] + ldp x12, x13, [x2, #32] + ldp x14, x15, [x1, #32] + ldp x16, x17, [x2, #16] + ldp x4, x2, [x2] + ldp x5, x6, [x1, #16] + ldp x18, x1, [x1] + adds x18, x4, x18 + str x18, [x0] + ldp x18, x4, [x3, #16] + adcs x1, x2, x1 + ldp x2, x3, [x3] + adcs x16, x16, x5 + stp x1, x16, [x0, #8] + adcs x16, x17, x6 + str x16, [x0, #24] + adcs x12, x12, x14 + adcs x13, x13, x15 + adcs x8, x8, x10 + adcs x9, x9, x11 + adcs x10, xzr, xzr + subs x11, x12, x2 + sbcs x14, x13, x3 + sbcs x15, x8, x18 + sbcs x16, x9, x4 + sbcs x10, x10, xzr + tst x10, #0x1 + csel x10, x12, x11, ne + csel x11, x13, x14, ne + csel x8, x8, x15, ne + csel x9, x9, x16, ne + stp x10, x11, [x0, #32] + stp x8, x9, [x0, #48] + ret +.Lfunc_end63: + .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L + + .globl mcl_fpDbl_sub4L + .align 2 + .type mcl_fpDbl_sub4L,@function +mcl_fpDbl_sub4L: // @mcl_fpDbl_sub4L +// BB#0: + ldp x8, x9, [x2, #48] + ldp x10, x11, [x1, #48] + ldp x12, x13, [x2, #32] + ldp x14, x15, [x1, #32] + ldp x16, x17, [x2, #16] + ldp x18, x2, [x2] + ldp x5, x6, [x1, #16] + ldp x4, x1, [x1] + subs x18, x4, x18 + str x18, [x0] + ldp x18, x4, [x3, #16] + sbcs x1, x1, x2 + ldp x2, x3, [x3] + sbcs x16, x5, x16 + stp x1, x16, [x0, #8] + sbcs x16, x6, x17 + sbcs x12, x14, x12 + sbcs x13, x15, x13 + sbcs x8, x10, x8 + sbcs x9, x11, x9 + ngcs x10, xzr + tst x10, #0x1 + csel x10, x4, xzr, ne + csel x11, x18, xzr, ne + csel x14, x3, xzr, ne + csel x15, x2, xzr, ne + adds x12, x15, x12 + stp x16, x12, [x0, #24] + adcs x12, x14, x13 + adcs x8, x11, x8 + stp x12, x8, [x0, #40] + adcs x8, x10, x9 + str x8, [x0, #56] + ret +.Lfunc_end64: + .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L + + .globl mcl_fp_mulUnitPre5L + .align 2 + .type mcl_fp_mulUnitPre5L,@function +mcl_fp_mulUnitPre5L: // @mcl_fp_mulUnitPre5L +// BB#0: + ldp x12, x8, [x1, #24] + ldp x9, x10, [x1] + ldr x11, [x1, #16] + mul x13, x9, x2 + mul x14, x10, x2 + umulh x9, x9, x2 + mul x15, x11, x2 + umulh x10, x10, x2 + mul x16, x12, x2 + umulh x11, x11, x2 + mul x17, x8, x2 + umulh x12, x12, x2 + umulh x8, x8, x2 + adds x9, x9, x14 + stp x13, x9, [x0] + adcs x9, x10, x15 + str x9, [x0, #16] + adcs x9, x11, x16 + str x9, [x0, #24] + adcs x9, x12, x17 + adcs x8, x8, xzr + stp x9, x8, [x0, #32] + ret +.Lfunc_end65: + .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L + + .globl mcl_fpDbl_mulPre5L + .align 2 + .type mcl_fpDbl_mulPre5L,@function +mcl_fpDbl_mulPre5L: // @mcl_fpDbl_mulPre5L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #176 // =176 + ldp x8, x10, [x1] + ldp x9, x15, [x1] + ldp x11, x12, [x1, #24] + ldp x13, x14, [x2] + ldp x16, x18, [x1, #16] + ldr x17, [x1, #16] + ldr x3, [x1, #32] + ldp x4, x5, [x2, #16] + mul x6, x8, x13 + str x6, [sp, #72] // 8-byte Folded Spill + umulh x6, x12, x13 + str x6, [sp, #168] // 8-byte Folded Spill + mul x6, x12, x13 + str x6, [sp, #152] // 8-byte Folded Spill + umulh x6, x11, x13 + str x6, [sp, #112] // 8-byte Folded Spill + mul x6, x11, x13 + str x6, [sp, #64] // 8-byte Folded Spill + umulh x6, x17, x13 + mul x23, x17, x13 + umulh x24, x10, x13 + mul x25, x10, x13 + umulh x7, x8, x13 + mul x26, x8, x14 + mul x13, x12, x14 + str x13, [sp, #104] // 8-byte Folded Spill + mul x13, x11, x14 + stp x13, x6, [sp, #40] + mul x29, x17, x14 + mul x30, x10, x14 + umulh x12, x12, x14 + umulh x11, x11, x14 + str x11, [sp, #96] // 8-byte Folded Spill + umulh x11, x17, x14 + umulh x27, x10, x14 + umulh x20, x8, x14 + mul x8, x9, x4 + stp x8, x11, [sp, #24] + mul x8, x3, x4 + stp x8, x12, [sp, #136] + mul x8, x18, x4 + str x8, [sp, #88] // 8-byte Folded Spill + mul x8, x16, x4 + str x8, [sp, #16] // 8-byte Folded Spill + mul x28, x15, x4 + umulh x8, x3, x4 + str x8, [sp, #160] // 8-byte Folded Spill + umulh x8, x18, x4 + str x8, [sp, #128] // 8-byte Folded Spill + umulh x8, x16, x4 + str x8, [sp, #80] // 8-byte Folded Spill + umulh x8, x15, x4 + str x8, [sp, #8] // 8-byte Folded Spill + umulh x22, x9, x4 + mul x8, x3, x5 + str x8, [sp, #120] // 8-byte Folded Spill + umulh x8, x3, x5 + str x8, [sp, #56] // 8-byte Folded Spill + mul x6, x18, x5 + umulh x21, x18, x5 + mul x3, x16, x5 + umulh x19, x16, x5 + mul x17, x15, x5 + umulh x4, x15, x5 + mul x16, x9, x5 + umulh x18, x9, x5 + ldr x2, [x2, #32] + ldp x10, x5, [x1, #16] + ldp x8, x9, [x1] + ldr x1, [x1, #32] + mul x15, x8, x2 + umulh x14, x8, x2 + mul x12, x9, x2 + umulh x13, x9, x2 + mul x11, x10, x2 + umulh x10, x10, x2 + mul x9, x5, x2 + umulh x5, x5, x2 + mul x8, x1, x2 + umulh x1, x1, x2 + ldr x2, [sp, #72] // 8-byte Folded Reload + str x2, [x0] + adds x2, x7, x25 + adcs x7, x24, x23 + ldr x23, [sp, #64] // 8-byte Folded Reload + ldr x24, [sp, #48] // 8-byte Folded Reload + adcs x23, x24, x23 + ldr x24, [sp, #152] // 8-byte Folded Reload + ldr x25, [sp, #112] // 8-byte Folded Reload + adcs x24, x25, x24 + ldr x25, [sp, #168] // 8-byte Folded Reload + adcs x25, x25, xzr + adds x2, x26, x2 + str x2, [x0, #8] + adcs x2, x30, x7 + adcs x7, x29, x23 + ldr x23, [sp, #40] // 8-byte Folded Reload + adcs x23, x23, x24 + ldr x24, [sp, #104] // 8-byte Folded Reload + adcs x24, x24, x25 + adcs x25, xzr, xzr + adds x2, x2, x20 + adcs x7, x7, x27 + ldr x20, [sp, #32] // 8-byte Folded Reload + adcs x20, x23, x20 + ldr x23, [sp, #96] // 8-byte Folded Reload + adcs x23, x24, x23 + ldr x24, [sp, #144] // 8-byte Folded Reload + adcs x24, x25, x24 + ldr x25, [sp, #24] // 8-byte Folded Reload + adds x2, x25, x2 + str x2, [x0, #16] + adcs x2, x28, x7 + ldr x7, [sp, #16] // 8-byte Folded Reload + adcs x7, x7, x20 + ldr x20, [sp, #88] // 8-byte Folded Reload + adcs x20, x20, x23 + ldr x23, [sp, #136] // 8-byte Folded Reload + adcs x23, x23, x24 + adcs x24, xzr, xzr + adds x2, x2, x22 + ldr x22, [sp, #8] // 8-byte Folded Reload + adcs x7, x7, x22 + ldr x22, [sp, #80] // 8-byte Folded Reload + adcs x20, x20, x22 + ldr x22, [sp, #128] // 8-byte Folded Reload + adcs x22, x23, x22 + ldr x23, [sp, #160] // 8-byte Folded Reload + adcs x23, x24, x23 + adds x16, x16, x2 + str x16, [x0, #24] + adcs x16, x17, x7 + adcs x17, x3, x20 + adcs x2, x6, x22 + ldr x3, [sp, #120] // 8-byte Folded Reload + adcs x3, x3, x23 + adcs x6, xzr, xzr + adds x16, x16, x18 + adcs x17, x17, x4 + adcs x18, x2, x19 + adcs x2, x3, x21 + ldr x3, [sp, #56] // 8-byte Folded Reload + adcs x3, x6, x3 + adds x15, x15, x16 + str x15, [x0, #32] + adcs x12, x12, x17 + adcs x11, x11, x18 + adcs x9, x9, x2 + adcs x8, x8, x3 + adcs x15, xzr, xzr + adds x12, x12, x14 + adcs x11, x11, x13 + stp x12, x11, [x0, #40] + adcs x9, x9, x10 + adcs x8, x8, x5 + stp x9, x8, [x0, #56] + adcs x8, x15, x1 + str x8, [x0, #72] + add sp, sp, #176 // =176 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end66: + .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L + + .globl mcl_fpDbl_sqrPre5L + .align 2 + .type mcl_fpDbl_sqrPre5L,@function +mcl_fpDbl_sqrPre5L: // @mcl_fpDbl_sqrPre5L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x1, #16] + ldp x12, x15, [x1] + ldp x13, x14, [x1, #24] + ldr x16, [x1, #16] + mul x17, x12, x12 + mul x18, x14, x12 + mul x2, x11, x12 + umulh x3, x16, x12 + mul x4, x16, x12 + umulh x5, x9, x12 + mul x6, x9, x12 + str x17, [x0] + umulh x17, x12, x12 + adds x17, x17, x6 + adcs x4, x5, x4 + adcs x2, x3, x2 + umulh x3, x11, x12 + adcs x18, x3, x18 + umulh x12, x14, x12 + adcs x12, x12, xzr + adds x17, x6, x17 + ldr x3, [x1] + str x17, [x0, #8] + mul x17, x9, x9 + adcs x17, x17, x4 + mul x4, x16, x9 + adcs x2, x4, x2 + mul x4, x11, x9 + adcs x18, x4, x18 + mul x4, x14, x9 + adcs x12, x4, x12 + adcs x4, xzr, xzr + adds x17, x17, x5 + umulh x5, x9, x9 + adcs x2, x2, x5 + umulh x5, x16, x9 + adcs x18, x18, x5 + ldr x5, [x1, #8] + umulh x11, x11, x9 + adcs x11, x12, x11 + ldr x12, [x1, #24] + umulh x9, x14, x9 + adcs x9, x4, x9 + mul x4, x3, x16 + adds x17, x4, x17 + mul x4, x14, x16 + str x17, [x0, #16] + mul x17, x5, x16 + adcs x17, x17, x2 + mul x2, x16, x16 + adcs x18, x2, x18 + mul x2, x12, x16 + adcs x11, x2, x11 + umulh x2, x3, x16 + adcs x9, x4, x9 + adcs x4, xzr, xzr + adds x17, x17, x2 + umulh x2, x5, x16 + adcs x18, x18, x2 + umulh x2, x16, x16 + adcs x11, x11, x2 + umulh x14, x14, x16 + umulh x16, x12, x16 + adcs x9, x9, x16 + ldr x16, [x1, #32] + adcs x14, x4, x14 + mul x1, x3, x12 + adds x17, x1, x17 + mul x1, x16, x12 + str x17, [x0, #24] + mul x17, x5, x12 + adcs x17, x17, x18 + mul x18, x10, x12 + adcs x11, x18, x11 + mul x18, x12, x12 + adcs x9, x18, x9 + umulh x18, x16, x12 + umulh x2, x3, x12 + adcs x14, x1, x14 + adcs x1, xzr, xzr + adds x17, x17, x2 + umulh x2, x10, x12 + umulh x3, x5, x12 + umulh x12, x12, x12 + adcs x11, x11, x3 + mul x3, x8, x16 + adcs x9, x9, x2 + mul x2, x13, x16 + adcs x12, x14, x12 + mul x14, x10, x16 + adcs x18, x1, x18 + mul x1, x15, x16 + adds x17, x17, x3 + mul x3, x16, x16 + umulh x8, x8, x16 + umulh x15, x15, x16 + umulh x10, x10, x16 + umulh x13, x13, x16 + umulh x16, x16, x16 + str x17, [x0, #32] + adcs x11, x11, x1 + adcs x9, x9, x14 + adcs x12, x12, x2 + adcs x14, x18, x3 + adcs x17, xzr, xzr + adds x8, x11, x8 + str x8, [x0, #40] + adcs x8, x9, x15 + str x8, [x0, #48] + adcs x8, x12, x10 + str x8, [x0, #56] + adcs x8, x14, x13 + str x8, [x0, #64] + adcs x8, x17, x16 + str x8, [x0, #72] + ret +.Lfunc_end67: + .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L + + .globl mcl_fp_mont5L + .align 2 + .type mcl_fp_mont5L,@function +mcl_fp_mont5L: // @mcl_fp_mont5L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #80 // =80 + str x0, [sp, #72] // 8-byte Folded Spill + ldp x16, x10, [x1, #24] + ldp x18, x0, [x1, #8] + ldr x17, [x1] + ldur x9, [x3, #-8] + str x9, [sp, #16] // 8-byte Folded Spill + ldp x11, x8, [x3, #24] + ldp x14, x12, [x3, #8] + ldr x13, [x3] + ldp x3, x1, [x2] + ldp x4, x5, [x2, #16] + ldr x2, [x2, #32] + umulh x6, x10, x3 + mul x7, x10, x3 + umulh x19, x16, x3 + mul x20, x16, x3 + umulh x21, x0, x3 + mul x22, x0, x3 + umulh x23, x18, x3 + mul x24, x18, x3 + umulh x25, x17, x3 + mul x3, x17, x3 + umulh x26, x1, x10 + mul x27, x1, x10 + umulh x28, x1, x16 + adds x24, x25, x24 + mul x25, x3, x9 + adcs x22, x23, x22 + mul x23, x25, x8 + mul x29, x25, x11 + mul x30, x25, x12 + adcs x20, x21, x20 + mul x21, x25, x14 + adcs x7, x19, x7 + umulh x19, x25, x13 + adcs x6, x6, xzr + adds x19, x19, x21 + umulh x21, x25, x14 + adcs x21, x21, x30 + umulh x30, x25, x12 + adcs x29, x30, x29 + umulh x30, x25, x11 + adcs x23, x30, x23 + umulh x30, x25, x8 + mul x25, x25, x13 + adcs x30, x30, xzr + cmn x25, x3 + mul x3, x1, x16 + umulh x25, x1, x0 + adcs x19, x19, x24 + mul x24, x1, x0 + adcs x21, x21, x22 + umulh x22, x1, x18 + adcs x20, x29, x20 + mul x29, x1, x18 + adcs x7, x23, x7 + umulh x23, x1, x17 + mul x1, x1, x17 + adcs x6, x30, x6 + adcs x30, xzr, xzr + adds x23, x23, x29 + umulh x29, x4, x10 + adcs x22, x22, x24 + mul x24, x4, x10 + adcs x3, x25, x3 + umulh x25, x4, x16 + adcs x27, x28, x27 + adcs x26, x26, xzr + adds x1, x19, x1 + adcs x19, x21, x23 + mul x21, x1, x9 + adcs x20, x20, x22 + mul x22, x21, x8 + mul x23, x21, x11 + mul x28, x21, x12 + adcs x3, x7, x3 + mul x7, x21, x14 + adcs x6, x6, x27 + umulh x27, x21, x13 + adcs x26, x30, x26 + adcs x30, xzr, xzr + adds x7, x27, x7 + umulh x27, x21, x14 + adcs x27, x27, x28 + umulh x28, x21, x12 + adcs x23, x28, x23 + umulh x28, x21, x11 + adcs x22, x28, x22 + umulh x28, x21, x8 + mul x21, x21, x13 + adcs x28, x28, xzr + cmn x21, x1 + mul x1, x4, x16 + umulh x21, x4, x0 + adcs x7, x7, x19 + mul x19, x4, x0 + adcs x20, x27, x20 + umulh x27, x4, x18 + adcs x3, x23, x3 + mul x23, x4, x18 + adcs x6, x22, x6 + umulh x22, x4, x17 + mul x4, x4, x17 + adcs x26, x28, x26 + umulh x15, x5, x10 + str x15, [sp, #64] // 8-byte Folded Spill + adcs x30, x30, xzr + adds x22, x22, x23 + mul x15, x5, x10 + str x15, [sp, #56] // 8-byte Folded Spill + adcs x19, x27, x19 + umulh x15, x5, x16 + str x15, [sp, #40] // 8-byte Folded Spill + adcs x1, x21, x1 + mul x15, x5, x16 + str x15, [sp, #32] // 8-byte Folded Spill + adcs x24, x25, x24 + adcs x25, x29, xzr + adds x4, x7, x4 + adcs x7, x20, x22 + mul x20, x4, x9 + adcs x3, x3, x19 + mul x19, x20, x8 + mul x22, x20, x11 + mov x15, x12 + mul x29, x20, x15 + adcs x1, x6, x1 + mov x21, x14 + mul x6, x20, x21 + adcs x24, x26, x24 + mov x9, x13 + umulh x26, x20, x9 + adcs x25, x30, x25 + adcs x30, xzr, xzr + adds x6, x26, x6 + umulh x26, x20, x21 + adcs x26, x26, x29 + umulh x29, x20, x15 + adcs x22, x29, x22 + umulh x29, x20, x11 + mov x13, x11 + adcs x19, x29, x19 + umulh x29, x20, x8 + mov x12, x8 + mul x20, x20, x9 + mov x14, x9 + adcs x29, x29, xzr + cmn x20, x4 + umulh x4, x5, x0 + mul x20, x5, x0 + umulh x11, x5, x18 + mul x9, x5, x18 + umulh x8, x5, x17 + mul x5, x5, x17 + umulh x23, x2, x10 + str x23, [sp, #48] // 8-byte Folded Spill + mul x10, x2, x10 + str x10, [sp, #24] // 8-byte Folded Spill + umulh x10, x2, x16 + str x10, [sp, #8] // 8-byte Folded Spill + mul x28, x2, x16 + umulh x27, x2, x0 + mul x23, x2, x0 + umulh x16, x2, x18 + mul x18, x2, x18 + umulh x0, x2, x17 + mul x17, x2, x17 + adcs x2, x6, x7 + adcs x3, x26, x3 + adcs x1, x22, x1 + adcs x6, x19, x24 + adcs x7, x29, x25 + adcs x19, x30, xzr + adds x8, x8, x9 + adcs x9, x11, x20 + ldr x10, [sp, #32] // 8-byte Folded Reload + adcs x10, x4, x10 + ldr x11, [sp, #56] // 8-byte Folded Reload + ldr x4, [sp, #40] // 8-byte Folded Reload + adcs x4, x4, x11 + ldr x11, [sp, #64] // 8-byte Folded Reload + adcs x20, x11, xzr + adds x2, x2, x5 + adcs x8, x3, x8 + ldr x24, [sp, #16] // 8-byte Folded Reload + mul x3, x2, x24 + adcs x9, x1, x9 + mul x1, x3, x12 + mul x5, x3, x13 + mul x22, x3, x15 + adcs x10, x6, x10 + mul x6, x3, x21 + adcs x4, x7, x4 + umulh x7, x3, x14 + adcs x19, x19, x20 + adcs x20, xzr, xzr + adds x6, x7, x6 + umulh x7, x3, x21 + adcs x7, x7, x22 + umulh x22, x3, x15 + mov x25, x15 + adcs x5, x22, x5 + umulh x22, x3, x13 + adcs x1, x22, x1 + umulh x22, x3, x12 + mul x3, x3, x14 + adcs x22, x22, xzr + cmn x3, x2 + adcs x8, x6, x8 + adcs x9, x7, x9 + adcs x10, x5, x10 + adcs x1, x1, x4 + adcs x2, x22, x19 + adcs x3, x20, xzr + adds x11, x0, x18 + adcs x15, x16, x23 + adcs x16, x27, x28 + ldr x18, [sp, #24] // 8-byte Folded Reload + ldr x0, [sp, #8] // 8-byte Folded Reload + adcs x18, x0, x18 + ldr x0, [sp, #48] // 8-byte Folded Reload + adcs x4, x0, xzr + adds x8, x8, x17 + adcs x9, x9, x11 + mul x11, x8, x24 + adcs x10, x10, x15 + umulh x15, x11, x12 + mul x17, x11, x12 + umulh x5, x11, x13 + mul x6, x11, x13 + mov x0, x13 + mov x20, x25 + umulh x7, x11, x20 + mul x19, x11, x20 + mov x23, x20 + mov x13, x21 + umulh x20, x11, x13 + mul x21, x11, x13 + umulh x22, x11, x14 + mul x11, x11, x14 + adcs x16, x1, x16 + adcs x18, x2, x18 + adcs x1, x3, x4 + adcs x2, xzr, xzr + adds x3, x22, x21 + adcs x4, x20, x19 + adcs x6, x7, x6 + adcs x17, x5, x17 + adcs x15, x15, xzr + cmn x11, x8 + adcs x8, x3, x9 + adcs x9, x4, x10 + adcs x10, x6, x16 + adcs x11, x17, x18 + adcs x15, x15, x1 + adcs x16, x2, xzr + subs x1, x8, x14 + sbcs x13, x9, x13 + sbcs x14, x10, x23 + sbcs x17, x11, x0 + sbcs x18, x15, x12 + sbcs x16, x16, xzr + tst x16, #0x1 + csel x8, x8, x1, ne + csel x9, x9, x13, ne + csel x10, x10, x14, ne + csel x11, x11, x17, ne + csel x12, x15, x18, ne + ldr x13, [sp, #72] // 8-byte Folded Reload + stp x8, x9, [x13] + stp x10, x11, [x13, #16] + str x12, [x13, #32] + add sp, sp, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end68: + .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L + + .globl mcl_fp_montNF5L + .align 2 + .type mcl_fp_montNF5L,@function +mcl_fp_montNF5L: // @mcl_fp_montNF5L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #32 // =32 + str x0, [sp, #24] // 8-byte Folded Spill + ldp x16, x14, [x1, #24] + ldp x18, x15, [x1, #8] + ldr x17, [x1] + ldur x13, [x3, #-8] + ldp x9, x8, [x3, #24] + ldp x11, x10, [x3, #8] + ldr x12, [x3] + ldp x1, x3, [x2] + ldp x4, x5, [x2, #16] + ldr x2, [x2, #32] + umulh x6, x14, x1 + mul x7, x14, x1 + umulh x19, x16, x1 + mul x20, x16, x1 + umulh x21, x15, x1 + mul x22, x15, x1 + umulh x23, x18, x1 + mul x24, x18, x1 + umulh x25, x17, x1 + mul x1, x17, x1 + umulh x26, x3, x14 + mul x27, x3, x14 + umulh x28, x3, x16 + mul x29, x3, x16 + umulh x30, x3, x15 + adds x24, x25, x24 + mul x25, x3, x15 + adcs x22, x23, x22 + umulh x23, x3, x18 + adcs x20, x21, x20 + mul x21, x1, x13 + adcs x7, x19, x7 + mul x19, x21, x12 + adcs x6, x6, xzr + cmn x19, x1 + mul x1, x3, x18 + mul x19, x21, x11 + adcs x19, x19, x24 + mul x24, x21, x10 + adcs x22, x24, x22 + mul x24, x21, x9 + adcs x20, x24, x20 + mul x24, x21, x8 + adcs x7, x24, x7 + umulh x24, x21, x12 + adcs x6, x6, xzr + adds x19, x19, x24 + umulh x24, x21, x11 + adcs x22, x22, x24 + umulh x24, x21, x10 + adcs x20, x20, x24 + umulh x24, x21, x9 + adcs x7, x7, x24 + umulh x24, x3, x17 + mul x3, x3, x17 + umulh x21, x21, x8 + adcs x6, x6, x21 + umulh x21, x4, x14 + adds x1, x24, x1 + mul x24, x4, x14 + adcs x23, x23, x25 + umulh x25, x4, x16 + adcs x29, x30, x29 + mul x30, x4, x16 + adcs x27, x28, x27 + umulh x28, x4, x15 + adcs x26, x26, xzr + adds x3, x3, x19 + mul x19, x4, x15 + adcs x1, x1, x22 + umulh x22, x4, x18 + adcs x20, x23, x20 + mul x23, x4, x18 + adcs x7, x29, x7 + mul x29, x3, x13 + adcs x6, x27, x6 + mul x27, x29, x12 + adcs x26, x26, xzr + cmn x27, x3 + umulh x3, x4, x17 + mul x4, x4, x17 + mul x27, x29, x11 + adcs x1, x27, x1 + mul x27, x29, x10 + adcs x20, x27, x20 + mul x27, x29, x9 + adcs x7, x27, x7 + mul x27, x29, x8 + adcs x6, x27, x6 + umulh x27, x29, x12 + adcs x26, x26, xzr + adds x1, x1, x27 + umulh x27, x29, x11 + adcs x20, x20, x27 + umulh x27, x29, x10 + adcs x7, x7, x27 + umulh x27, x29, x9 + adcs x6, x6, x27 + umulh x27, x5, x14 + umulh x29, x29, x8 + adcs x26, x26, x29 + mul x29, x5, x14 + adds x3, x3, x23 + umulh x23, x5, x16 + adcs x19, x22, x19 + mul x22, x5, x16 + adcs x28, x28, x30 + umulh x30, x5, x15 + adcs x24, x25, x24 + mul x25, x5, x15 + adcs x21, x21, xzr + adds x1, x4, x1 + umulh x4, x5, x18 + adcs x3, x3, x20 + mul x20, x5, x18 + adcs x7, x19, x7 + umulh x19, x5, x17 + mul x5, x5, x17 + adcs x6, x28, x6 + mul x28, x1, x13 + adcs x24, x24, x26 + mul x26, x28, x12 + adcs x21, x21, xzr + cmn x26, x1 + umulh x0, x2, x14 + mul x14, x2, x14 + stp x14, x0, [sp, #8] + umulh x26, x2, x16 + mul x1, x2, x16 + umulh x0, x2, x15 + mul x16, x2, x15 + umulh x15, x2, x18 + mul x18, x2, x18 + umulh x14, x2, x17 + mul x17, x2, x17 + mul x2, x28, x11 + adcs x2, x2, x3 + mul x3, x28, x10 + adcs x3, x3, x7 + mul x7, x28, x9 + adcs x6, x7, x6 + mul x7, x28, x8 + adcs x7, x7, x24 + adcs x21, x21, xzr + umulh x24, x28, x12 + adds x2, x2, x24 + umulh x24, x28, x11 + adcs x3, x3, x24 + umulh x24, x28, x10 + adcs x6, x6, x24 + umulh x24, x28, x9 + adcs x7, x7, x24 + umulh x24, x28, x8 + adcs x21, x21, x24 + adds x19, x19, x20 + adcs x4, x4, x25 + adcs x20, x30, x22 + adcs x22, x23, x29 + adcs x23, x27, xzr + adds x2, x5, x2 + adcs x3, x19, x3 + mov x24, x13 + mul x5, x2, x24 + adcs x4, x4, x6 + mul x6, x5, x8 + mul x19, x5, x9 + adcs x7, x20, x7 + mul x20, x5, x10 + adcs x21, x22, x21 + mul x22, x5, x12 + adcs x23, x23, xzr + cmn x22, x2 + mul x2, x5, x11 + umulh x22, x5, x8 + adcs x2, x2, x3 + umulh x3, x5, x9 + adcs x4, x20, x4 + umulh x20, x5, x10 + adcs x7, x19, x7 + umulh x19, x5, x11 + umulh x5, x5, x12 + adcs x6, x6, x21 + adcs x21, x23, xzr + adds x2, x2, x5 + adcs x4, x4, x19 + adcs x5, x7, x20 + adcs x3, x6, x3 + adcs x6, x21, x22 + adds x13, x14, x18 + adcs x14, x15, x16 + adcs x15, x0, x1 + ldp x16, x18, [sp, #8] + adcs x16, x26, x16 + adcs x18, x18, xzr + adds x17, x17, x2 + adcs x13, x13, x4 + mul x0, x17, x24 + adcs x14, x14, x5 + mul x1, x0, x8 + mul x2, x0, x9 + mul x4, x0, x10 + mul x5, x0, x11 + mul x7, x0, x12 + umulh x19, x0, x8 + umulh x20, x0, x9 + umulh x21, x0, x10 + umulh x22, x0, x11 + umulh x0, x0, x12 + adcs x15, x15, x3 + adcs x16, x16, x6 + adcs x18, x18, xzr + cmn x7, x17 + adcs x13, x5, x13 + adcs x14, x4, x14 + adcs x15, x2, x15 + adcs x16, x1, x16 + adcs x17, x18, xzr + adds x13, x13, x0 + adcs x14, x14, x22 + adcs x15, x15, x21 + adcs x16, x16, x20 + adcs x17, x17, x19 + subs x12, x13, x12 + sbcs x11, x14, x11 + sbcs x10, x15, x10 + sbcs x9, x16, x9 + sbcs x8, x17, x8 + asr x18, x8, #63 + cmp x18, #0 // =0 + csel x12, x13, x12, lt + csel x11, x14, x11, lt + csel x10, x15, x10, lt + csel x9, x16, x9, lt + csel x8, x17, x8, lt + ldr x13, [sp, #24] // 8-byte Folded Reload + stp x12, x11, [x13] + stp x10, x9, [x13, #16] + str x8, [x13, #32] + add sp, sp, #32 // =32 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end69: + .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L + + .globl mcl_fp_montRed5L + .align 2 + .type mcl_fp_montRed5L,@function +mcl_fp_montRed5L: // @mcl_fp_montRed5L +// BB#0: + stp x26, x25, [sp, #-64]! + stp x24, x23, [sp, #16] + stp x22, x21, [sp, #32] + stp x20, x19, [sp, #48] + ldur x13, [x2, #-8] + ldp x9, x8, [x2, #24] + ldp x11, x10, [x2, #8] + ldr x12, [x2] + ldp x15, x16, [x1, #64] + ldp x17, x18, [x1, #48] + ldp x2, x3, [x1, #32] + ldp x4, x5, [x1, #16] + ldp x14, x1, [x1] + mul x6, x14, x13 + umulh x7, x6, x8 + mul x19, x6, x8 + umulh x20, x6, x9 + mul x21, x6, x9 + umulh x22, x6, x10 + mul x23, x6, x10 + umulh x24, x6, x11 + mul x25, x6, x11 + umulh x26, x6, x12 + mul x6, x6, x12 + adds x25, x26, x25 + adcs x23, x24, x23 + adcs x21, x22, x21 + adcs x19, x20, x19 + adcs x7, x7, xzr + cmn x14, x6 + adcs x14, x1, x25 + adcs x1, x4, x23 + mul x4, x14, x13 + adcs x5, x5, x21 + umulh x6, x4, x8 + mul x20, x4, x8 + umulh x21, x4, x9 + mul x22, x4, x9 + umulh x23, x4, x10 + mul x24, x4, x10 + umulh x25, x4, x11 + mul x26, x4, x11 + adcs x2, x2, x19 + umulh x19, x4, x12 + mul x4, x4, x12 + adcs x3, x3, x7 + adcs x17, x17, xzr + adcs x18, x18, xzr + adcs x15, x15, xzr + adcs x16, x16, xzr + adcs x7, xzr, xzr + adds x19, x19, x26 + adcs x24, x25, x24 + adcs x22, x23, x22 + adcs x20, x21, x20 + adcs x6, x6, xzr + cmn x4, x14 + adcs x14, x19, x1 + adcs x1, x24, x5 + mul x4, x14, x13 + adcs x2, x22, x2 + umulh x5, x4, x8 + mul x19, x4, x8 + umulh x21, x4, x9 + mul x22, x4, x9 + umulh x23, x4, x10 + mul x24, x4, x10 + umulh x25, x4, x11 + mul x26, x4, x11 + adcs x3, x20, x3 + umulh x20, x4, x12 + mul x4, x4, x12 + adcs x17, x6, x17 + adcs x18, x18, xzr + adcs x15, x15, xzr + adcs x16, x16, xzr + adcs x6, x7, xzr + adds x7, x20, x26 + adcs x20, x25, x24 + adcs x22, x23, x22 + adcs x19, x21, x19 + adcs x5, x5, xzr + cmn x4, x14 + adcs x14, x7, x1 + adcs x1, x20, x2 + mul x2, x14, x13 + adcs x3, x22, x3 + umulh x4, x2, x8 + mul x7, x2, x8 + umulh x20, x2, x9 + mul x21, x2, x9 + umulh x22, x2, x10 + mul x23, x2, x10 + umulh x24, x2, x11 + mul x25, x2, x11 + umulh x26, x2, x12 + mul x2, x2, x12 + adcs x17, x19, x17 + adcs x18, x5, x18 + adcs x15, x15, xzr + adcs x16, x16, xzr + adcs x5, x6, xzr + adds x6, x26, x25 + adcs x19, x24, x23 + adcs x21, x22, x21 + adcs x7, x20, x7 + adcs x4, x4, xzr + cmn x2, x14 + adcs x14, x6, x1 + adcs x1, x19, x3 + mul x13, x14, x13 + adcs x17, x21, x17 + umulh x2, x13, x8 + mul x3, x13, x8 + umulh x6, x13, x9 + mul x19, x13, x9 + umulh x20, x13, x10 + mul x21, x13, x10 + umulh x22, x13, x11 + mul x23, x13, x11 + umulh x24, x13, x12 + mul x13, x13, x12 + adcs x18, x7, x18 + adcs x15, x4, x15 + adcs x16, x16, xzr + adcs x4, x5, xzr + adds x5, x24, x23 + adcs x7, x22, x21 + adcs x19, x20, x19 + adcs x3, x6, x3 + adcs x2, x2, xzr + cmn x13, x14 + adcs x13, x5, x1 + adcs x14, x7, x17 + adcs x17, x19, x18 + adcs x15, x3, x15 + adcs x16, x2, x16 + adcs x18, x4, xzr + subs x12, x13, x12 + sbcs x11, x14, x11 + sbcs x10, x17, x10 + sbcs x9, x15, x9 + sbcs x8, x16, x8 + sbcs x18, x18, xzr + tst x18, #0x1 + csel x12, x13, x12, ne + csel x11, x14, x11, ne + csel x10, x17, x10, ne + csel x9, x15, x9, ne + csel x8, x16, x8, ne + stp x12, x11, [x0] + stp x10, x9, [x0, #16] + str x8, [x0, #32] + ldp x20, x19, [sp, #48] + ldp x22, x21, [sp, #32] + ldp x24, x23, [sp, #16] + ldp x26, x25, [sp], #64 + ret +.Lfunc_end70: + .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L + + .globl mcl_fp_addPre5L + .align 2 + .type mcl_fp_addPre5L,@function +mcl_fp_addPre5L: // @mcl_fp_addPre5L +// BB#0: + ldp x11, x8, [x2, #24] + ldp x17, x9, [x1, #24] + ldp x13, x10, [x2, #8] + ldr x12, [x2] + ldp x14, x15, [x1] + ldr x16, [x1, #16] + adds x12, x12, x14 + str x12, [x0] + adcs x12, x13, x15 + adcs x10, x10, x16 + stp x12, x10, [x0, #8] + adcs x10, x11, x17 + adcs x9, x8, x9 + adcs x8, xzr, xzr + stp x10, x9, [x0, #24] + mov x0, x8 + ret +.Lfunc_end71: + .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L + + .globl mcl_fp_subPre5L + .align 2 + .type mcl_fp_subPre5L,@function +mcl_fp_subPre5L: // @mcl_fp_subPre5L +// BB#0: + ldp x11, x8, [x2, #24] + ldp x17, x9, [x1, #24] + ldp x13, x10, [x2, #8] + ldr x12, [x2] + ldp x14, x15, [x1] + ldr x16, [x1, #16] + subs x12, x14, x12 + str x12, [x0] + sbcs x12, x15, x13 + sbcs x10, x16, x10 + stp x12, x10, [x0, #8] + sbcs x10, x17, x11 + sbcs x9, x9, x8 + ngcs x8, xzr + and x8, x8, #0x1 + stp x10, x9, [x0, #24] + mov x0, x8 + ret +.Lfunc_end72: + .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L + + .globl mcl_fp_shr1_5L + .align 2 + .type mcl_fp_shr1_5L,@function +mcl_fp_shr1_5L: // @mcl_fp_shr1_5L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x1, #16] + ldr x12, [x1, #32] + extr x8, x9, x8, #1 + extr x9, x10, x9, #1 + extr x10, x11, x10, #1 + extr x11, x12, x11, #1 + lsr x12, x12, #1 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + str x12, [x0, #32] + ret +.Lfunc_end73: + .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L + + .globl mcl_fp_add5L + .align 2 + .type mcl_fp_add5L,@function +mcl_fp_add5L: // @mcl_fp_add5L +// BB#0: + ldp x11, x8, [x2, #24] + ldp x17, x9, [x1, #24] + ldp x13, x10, [x2, #8] + ldr x12, [x2] + ldp x14, x15, [x1] + ldr x16, [x1, #16] + adds x12, x12, x14 + ldr x14, [x3, #32] + adcs x13, x13, x15 + adcs x10, x10, x16 + ldp x15, x16, [x3] + stp x12, x13, [x0] + adcs x17, x11, x17 + stp x10, x17, [x0, #16] + adcs x8, x8, x9 + str x8, [x0, #32] + adcs x18, xzr, xzr + ldp x9, x1, [x3, #16] + subs x12, x12, x15 + sbcs x11, x13, x16 + sbcs x10, x10, x9 + sbcs x9, x17, x1 + sbcs x8, x8, x14 + sbcs x13, x18, xzr + and w13, w13, #0x1 + tbnz w13, #0, .LBB74_2 +// BB#1: // %nocarry + stp x12, x11, [x0] + stp x10, x9, [x0, #16] + str x8, [x0, #32] +.LBB74_2: // %carry + ret +.Lfunc_end74: + .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L + + .globl mcl_fp_addNF5L + .align 2 + .type mcl_fp_addNF5L,@function +mcl_fp_addNF5L: // @mcl_fp_addNF5L +// BB#0: + ldp x11, x8, [x1, #24] + ldp x17, x9, [x2, #24] + ldp x13, x10, [x1, #8] + ldr x12, [x1] + ldp x14, x15, [x2] + ldr x16, [x2, #16] + adds x12, x14, x12 + ldp x18, x14, [x3, #24] + adcs x13, x15, x13 + adcs x10, x16, x10 + ldp x15, x16, [x3] + adcs x11, x17, x11 + ldr x17, [x3, #16] + adcs x8, x9, x8 + subs x9, x12, x15 + sbcs x15, x13, x16 + sbcs x16, x10, x17 + sbcs x17, x11, x18 + sbcs x14, x8, x14 + asr x18, x14, #63 + cmp x18, #0 // =0 + csel x9, x12, x9, lt + csel x12, x13, x15, lt + csel x10, x10, x16, lt + csel x11, x11, x17, lt + csel x8, x8, x14, lt + stp x9, x12, [x0] + stp x10, x11, [x0, #16] + str x8, [x0, #32] + ret +.Lfunc_end75: + .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L + + .globl mcl_fp_sub5L + .align 2 + .type mcl_fp_sub5L,@function +mcl_fp_sub5L: // @mcl_fp_sub5L +// BB#0: + ldp x11, x12, [x2, #24] + ldp x17, x13, [x1, #24] + ldp x9, x10, [x2, #8] + ldr x8, [x2] + ldp x14, x15, [x1] + ldr x16, [x1, #16] + subs x8, x14, x8 + sbcs x9, x15, x9 + stp x8, x9, [x0] + sbcs x10, x16, x10 + sbcs x11, x17, x11 + stp x10, x11, [x0, #16] + sbcs x12, x13, x12 + str x12, [x0, #32] + ngcs x13, xzr + and w13, w13, #0x1 + tbnz w13, #0, .LBB76_2 +// BB#1: // %nocarry + ret +.LBB76_2: // %carry + ldp x17, x13, [x3, #24] + ldp x14, x15, [x3] + ldr x16, [x3, #16] + adds x8, x14, x8 + adcs x9, x15, x9 + adcs x10, x16, x10 + adcs x11, x17, x11 + adcs x12, x13, x12 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + str x12, [x0, #32] + ret +.Lfunc_end76: + .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L + + .globl mcl_fp_subNF5L + .align 2 + .type mcl_fp_subNF5L,@function +mcl_fp_subNF5L: // @mcl_fp_subNF5L +// BB#0: + ldp x11, x8, [x2, #24] + ldp x17, x9, [x1, #24] + ldp x13, x10, [x2, #8] + ldr x12, [x2] + ldp x14, x15, [x1] + ldr x16, [x1, #16] + subs x12, x14, x12 + sbcs x13, x15, x13 + ldp x1, x14, [x3, #8] + ldp x15, x18, [x3, #24] + sbcs x10, x16, x10 + ldr x16, [x3] + sbcs x11, x17, x11 + sbcs x8, x9, x8 + asr x9, x8, #63 + extr x17, x9, x8, #63 + and x16, x17, x16 + and x14, x14, x9, ror #63 + and x15, x9, x15 + and x17, x9, x18 + ror x9, x9, #63 + and x9, x9, x1 + adds x12, x16, x12 + adcs x9, x9, x13 + stp x12, x9, [x0] + adcs x9, x14, x10 + str x9, [x0, #16] + adcs x9, x15, x11 + adcs x8, x17, x8 + stp x9, x8, [x0, #24] + ret +.Lfunc_end77: + .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L + + .globl mcl_fpDbl_add5L + .align 2 + .type mcl_fpDbl_add5L,@function +mcl_fpDbl_add5L: // @mcl_fpDbl_add5L +// BB#0: + stp x22, x21, [sp, #-32]! + stp x20, x19, [sp, #16] + ldp x8, x9, [x2, #64] + ldp x10, x11, [x1, #64] + ldp x12, x13, [x2, #48] + ldp x14, x15, [x1, #48] + ldp x16, x17, [x2, #32] + ldp x18, x4, [x1, #32] + ldp x5, x6, [x2, #16] + ldp x19, x2, [x2] + ldp x20, x21, [x1, #16] + ldp x7, x1, [x1] + adds x7, x19, x7 + ldr x19, [x3, #32] + str x7, [x0] + adcs x1, x2, x1 + ldp x2, x7, [x3, #16] + str x1, [x0, #8] + ldp x1, x3, [x3] + adcs x5, x5, x20 + str x5, [x0, #16] + adcs x5, x6, x21 + adcs x16, x16, x18 + stp x5, x16, [x0, #24] + adcs x16, x17, x4 + adcs x12, x12, x14 + adcs x13, x13, x15 + adcs x8, x8, x10 + adcs x9, x9, x11 + adcs x10, xzr, xzr + subs x11, x16, x1 + sbcs x14, x12, x3 + sbcs x15, x13, x2 + sbcs x17, x8, x7 + sbcs x18, x9, x19 + sbcs x10, x10, xzr + tst x10, #0x1 + csel x10, x16, x11, ne + csel x11, x12, x14, ne + csel x12, x13, x15, ne + csel x8, x8, x17, ne + csel x9, x9, x18, ne + stp x10, x11, [x0, #40] + stp x12, x8, [x0, #56] + str x9, [x0, #72] + ldp x20, x19, [sp, #16] + ldp x22, x21, [sp], #32 + ret +.Lfunc_end78: + .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L + + .globl mcl_fpDbl_sub5L + .align 2 + .type mcl_fpDbl_sub5L,@function +mcl_fpDbl_sub5L: // @mcl_fpDbl_sub5L +// BB#0: + stp x22, x21, [sp, #-32]! + stp x20, x19, [sp, #16] + ldp x8, x9, [x2, #64] + ldp x10, x11, [x1, #64] + ldp x12, x13, [x2, #48] + ldp x14, x15, [x1, #48] + ldp x16, x17, [x2, #32] + ldp x18, x4, [x1, #32] + ldp x5, x6, [x2, #16] + ldp x7, x2, [x2] + ldp x20, x21, [x1, #16] + ldp x19, x1, [x1] + subs x7, x19, x7 + ldr x19, [x3, #32] + str x7, [x0] + sbcs x1, x1, x2 + ldp x2, x7, [x3, #16] + str x1, [x0, #8] + ldp x1, x3, [x3] + sbcs x5, x20, x5 + str x5, [x0, #16] + sbcs x5, x21, x6 + sbcs x16, x18, x16 + stp x5, x16, [x0, #24] + sbcs x16, x4, x17 + sbcs x12, x14, x12 + sbcs x13, x15, x13 + sbcs x8, x10, x8 + sbcs x9, x11, x9 + ngcs x10, xzr + tst x10, #0x1 + csel x10, x19, xzr, ne + csel x11, x7, xzr, ne + csel x14, x2, xzr, ne + csel x15, x3, xzr, ne + csel x17, x1, xzr, ne + adds x16, x17, x16 + adcs x12, x15, x12 + stp x16, x12, [x0, #40] + adcs x12, x14, x13 + adcs x8, x11, x8 + stp x12, x8, [x0, #56] + adcs x8, x10, x9 + str x8, [x0, #72] + ldp x20, x19, [sp, #16] + ldp x22, x21, [sp], #32 + ret +.Lfunc_end79: + .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L + + .globl mcl_fp_mulUnitPre6L + .align 2 + .type mcl_fp_mulUnitPre6L,@function +mcl_fp_mulUnitPre6L: // @mcl_fp_mulUnitPre6L +// BB#0: + ldp x8, x9, [x1, #32] + ldp x10, x11, [x1] + ldp x12, x13, [x1, #16] + mul x14, x10, x2 + mul x15, x11, x2 + umulh x10, x10, x2 + mul x16, x12, x2 + umulh x11, x11, x2 + mul x17, x13, x2 + umulh x12, x12, x2 + mul x18, x8, x2 + umulh x13, x13, x2 + mul x1, x9, x2 + umulh x8, x8, x2 + umulh x9, x9, x2 + adds x10, x10, x15 + stp x14, x10, [x0] + adcs x10, x11, x16 + str x10, [x0, #16] + adcs x10, x12, x17 + str x10, [x0, #24] + adcs x10, x13, x18 + adcs x8, x8, x1 + stp x10, x8, [x0, #32] + adcs x8, x9, xzr + str x8, [x0, #48] + ret +.Lfunc_end80: + .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L + + .globl mcl_fpDbl_mulPre6L + .align 2 + .type mcl_fpDbl_mulPre6L,@function +mcl_fpDbl_mulPre6L: // @mcl_fpDbl_mulPre6L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #400 // =400 + ldp x8, x9, [x1] + ldp x11, x13, [x1] + ldp x10, x17, [x1, #16] + ldp x12, x14, [x1, #32] + ldp x15, x16, [x2] + ldr x3, [x1, #32] + mul x30, x8, x15 + umulh x18, x14, x15 + str x18, [sp, #392] // 8-byte Folded Spill + mul x18, x14, x15 + str x18, [sp, #384] // 8-byte Folded Spill + umulh x18, x12, x15 + str x18, [sp, #376] // 8-byte Folded Spill + mul x18, x12, x15 + str x18, [sp, #360] // 8-byte Folded Spill + umulh x18, x17, x15 + str x18, [sp, #336] // 8-byte Folded Spill + mul x18, x17, x15 + str x18, [sp, #312] // 8-byte Folded Spill + umulh x18, x10, x15 + str x18, [sp, #304] // 8-byte Folded Spill + mul x18, x10, x15 + str x18, [sp, #272] // 8-byte Folded Spill + umulh x18, x9, x15 + str x18, [sp, #248] // 8-byte Folded Spill + mul x18, x9, x15 + umulh x15, x8, x15 + stp x15, x18, [sp, #216] + mul x15, x8, x16 + str x15, [sp, #280] // 8-byte Folded Spill + mul x15, x14, x16 + str x15, [sp, #352] // 8-byte Folded Spill + mul x15, x12, x16 + str x15, [sp, #328] // 8-byte Folded Spill + mul x15, x17, x16 + str x15, [sp, #296] // 8-byte Folded Spill + mul x15, x10, x16 + str x15, [sp, #264] // 8-byte Folded Spill + mul x15, x9, x16 + umulh x14, x14, x16 + str x14, [sp, #368] // 8-byte Folded Spill + umulh x12, x12, x16 + str x12, [sp, #344] // 8-byte Folded Spill + umulh x12, x17, x16 + str x12, [sp, #320] // 8-byte Folded Spill + umulh x10, x10, x16 + str x10, [sp, #288] // 8-byte Folded Spill + umulh x9, x9, x16 + str x9, [sp, #256] // 8-byte Folded Spill + umulh x8, x8, x16 + stp x8, x15, [sp, #232] + ldp x12, x8, [x2, #16] + ldr x9, [x1, #40] + ldp x15, x10, [x1, #16] + mul x14, x11, x12 + str x14, [sp, #144] // 8-byte Folded Spill + mul x14, x9, x12 + str x14, [sp, #200] // 8-byte Folded Spill + mul x14, x3, x12 + str x14, [sp, #176] // 8-byte Folded Spill + mul x14, x10, x12 + str x14, [sp, #160] // 8-byte Folded Spill + mul x14, x15, x12 + str x14, [sp, #128] // 8-byte Folded Spill + mul x14, x13, x12 + str x14, [sp, #112] // 8-byte Folded Spill + umulh x14, x9, x12 + str x14, [sp, #208] // 8-byte Folded Spill + umulh x14, x3, x12 + str x14, [sp, #192] // 8-byte Folded Spill + umulh x14, x10, x12 + str x14, [sp, #168] // 8-byte Folded Spill + umulh x14, x15, x12 + str x14, [sp, #152] // 8-byte Folded Spill + umulh x14, x13, x12 + str x14, [sp, #120] // 8-byte Folded Spill + umulh x12, x11, x12 + str x12, [sp, #104] // 8-byte Folded Spill + mul x12, x9, x8 + str x12, [sp, #184] // 8-byte Folded Spill + umulh x9, x9, x8 + str x9, [sp, #136] // 8-byte Folded Spill + mul x9, x3, x8 + str x9, [sp, #80] // 8-byte Folded Spill + umulh x9, x3, x8 + str x9, [sp, #96] // 8-byte Folded Spill + mul x9, x10, x8 + str x9, [sp, #64] // 8-byte Folded Spill + umulh x9, x10, x8 + str x9, [sp, #88] // 8-byte Folded Spill + mul x9, x15, x8 + str x9, [sp, #48] // 8-byte Folded Spill + umulh x9, x15, x8 + str x9, [sp, #72] // 8-byte Folded Spill + mul x9, x13, x8 + str x9, [sp, #32] // 8-byte Folded Spill + umulh x9, x13, x8 + str x9, [sp, #56] // 8-byte Folded Spill + mul x9, x11, x8 + str x9, [sp, #24] // 8-byte Folded Spill + umulh x8, x11, x8 + str x8, [sp, #40] // 8-byte Folded Spill + ldp x12, x13, [x1, #32] + ldp x9, x10, [x1] + ldp x11, x1, [x1, #16] + ldp x8, x2, [x2, #32] + mul x22, x9, x8 + mul x28, x13, x8 + mul x27, x12, x8 + mul x24, x1, x8 + mul x20, x11, x8 + mul x19, x10, x8 + umulh x14, x13, x8 + str x14, [sp, #16] // 8-byte Folded Spill + umulh x29, x12, x8 + umulh x26, x1, x8 + umulh x23, x11, x8 + umulh x21, x10, x8 + umulh x7, x9, x8 + mul x25, x9, x2 + umulh x6, x9, x2 + mul x4, x10, x2 + umulh x5, x10, x2 + mul x18, x11, x2 + umulh x3, x11, x2 + mul x16, x1, x2 + umulh x1, x1, x2 + mul x15, x12, x2 + umulh x17, x12, x2 + mul x14, x13, x2 + umulh x13, x13, x2 + str x30, [x0] + ldp x9, x8, [sp, #216] + adds x2, x9, x8 + ldp x8, x30, [sp, #272] + ldr x9, [sp, #248] // 8-byte Folded Reload + adcs x8, x9, x8 + ldp x10, x9, [sp, #304] + adcs x9, x10, x9 + ldr x10, [sp, #360] // 8-byte Folded Reload + ldr x11, [sp, #336] // 8-byte Folded Reload + adcs x10, x11, x10 + ldp x12, x11, [sp, #376] + adcs x11, x12, x11 + ldr x12, [sp, #392] // 8-byte Folded Reload + adcs x12, x12, xzr + adds x2, x30, x2 + str x2, [x0, #8] + ldp x30, x2, [sp, #232] + adcs x8, x2, x8 + ldr x2, [sp, #264] // 8-byte Folded Reload + adcs x9, x2, x9 + ldr x2, [sp, #296] // 8-byte Folded Reload + adcs x10, x2, x10 + ldr x2, [sp, #328] // 8-byte Folded Reload + adcs x11, x2, x11 + ldr x2, [sp, #352] // 8-byte Folded Reload + adcs x12, x2, x12 + adcs x2, xzr, xzr + adds x8, x8, x30 + ldr x30, [sp, #256] // 8-byte Folded Reload + adcs x9, x9, x30 + ldr x30, [sp, #288] // 8-byte Folded Reload + adcs x10, x10, x30 + ldr x30, [sp, #320] // 8-byte Folded Reload + adcs x11, x11, x30 + ldr x30, [sp, #344] // 8-byte Folded Reload + adcs x12, x12, x30 + ldr x30, [sp, #368] // 8-byte Folded Reload + adcs x2, x2, x30 + ldr x30, [sp, #144] // 8-byte Folded Reload + adds x8, x30, x8 + str x8, [x0, #16] + ldp x30, x8, [sp, #104] + adcs x8, x8, x9 + ldr x9, [sp, #128] // 8-byte Folded Reload + adcs x9, x9, x10 + ldr x10, [sp, #160] // 8-byte Folded Reload + adcs x10, x10, x11 + ldr x11, [sp, #176] // 8-byte Folded Reload + adcs x11, x11, x12 + ldr x12, [sp, #200] // 8-byte Folded Reload + adcs x12, x12, x2 + adcs x2, xzr, xzr + adds x8, x8, x30 + ldr x30, [sp, #120] // 8-byte Folded Reload + adcs x9, x9, x30 + ldr x30, [sp, #152] // 8-byte Folded Reload + adcs x10, x10, x30 + ldr x30, [sp, #168] // 8-byte Folded Reload + adcs x11, x11, x30 + ldr x30, [sp, #192] // 8-byte Folded Reload + adcs x12, x12, x30 + ldr x30, [sp, #208] // 8-byte Folded Reload + adcs x2, x2, x30 + ldr x30, [sp, #24] // 8-byte Folded Reload + adds x8, x30, x8 + str x8, [x0, #24] + ldp x8, x30, [sp, #32] + adcs x8, x8, x9 + ldr x9, [sp, #48] // 8-byte Folded Reload + adcs x9, x9, x10 + ldr x10, [sp, #64] // 8-byte Folded Reload + adcs x10, x10, x11 + ldr x11, [sp, #80] // 8-byte Folded Reload + adcs x11, x11, x12 + ldr x12, [sp, #184] // 8-byte Folded Reload + adcs x12, x12, x2 + adcs x2, xzr, xzr + adds x8, x8, x30 + ldr x30, [sp, #56] // 8-byte Folded Reload + adcs x9, x9, x30 + ldr x30, [sp, #72] // 8-byte Folded Reload + adcs x10, x10, x30 + ldr x30, [sp, #88] // 8-byte Folded Reload + adcs x11, x11, x30 + ldr x30, [sp, #96] // 8-byte Folded Reload + adcs x12, x12, x30 + ldr x30, [sp, #136] // 8-byte Folded Reload + adcs x2, x2, x30 + adds x8, x22, x8 + str x8, [x0, #32] + adcs x8, x19, x9 + adcs x9, x20, x10 + adcs x10, x24, x11 + adcs x11, x27, x12 + adcs x12, x28, x2 + adcs x2, xzr, xzr + adds x8, x8, x7 + adcs x9, x9, x21 + adcs x10, x10, x23 + adcs x11, x11, x26 + adcs x12, x12, x29 + ldr x7, [sp, #16] // 8-byte Folded Reload + adcs x2, x2, x7 + adds x8, x25, x8 + str x8, [x0, #40] + adcs x8, x4, x9 + adcs x9, x18, x10 + adcs x10, x16, x11 + adcs x11, x15, x12 + adcs x12, x14, x2 + adcs x14, xzr, xzr + adds x8, x8, x6 + str x8, [x0, #48] + adcs x8, x9, x5 + str x8, [x0, #56] + adcs x8, x10, x3 + str x8, [x0, #64] + adcs x8, x11, x1 + str x8, [x0, #72] + adcs x8, x12, x17 + str x8, [x0, #80] + adcs x8, x14, x13 + str x8, [x0, #88] + add sp, sp, #400 // =400 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end81: + .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L + + .globl mcl_fpDbl_sqrPre6L + .align 2 + .type mcl_fpDbl_sqrPre6L,@function +mcl_fpDbl_sqrPre6L: // @mcl_fpDbl_sqrPre6L +// BB#0: + stp x20, x19, [sp, #-16]! + ldp x8, x9, [x1, #8] + ldp x15, x10, [x1, #32] + ldp x11, x13, [x1] + ldr x12, [x1] + ldp x17, x14, [x1, #32] + ldr x16, [x1, #24] + mul x18, x11, x11 + umulh x2, x10, x11 + mul x3, x15, x11 + mul x4, x16, x11 + umulh x5, x9, x11 + mul x6, x9, x11 + umulh x7, x8, x11 + mul x19, x8, x11 + str x18, [x0] + umulh x18, x11, x11 + adds x18, x18, x19 + adcs x6, x7, x6 + adcs x4, x5, x4 + umulh x5, x16, x11 + adcs x3, x5, x3 + mul x5, x10, x11 + umulh x11, x15, x11 + adcs x11, x11, x5 + adcs x2, x2, xzr + adds x18, x19, x18 + ldp x5, x19, [x1, #16] + str x18, [x0, #8] + mul x18, x8, x8 + adcs x18, x18, x6 + mul x6, x9, x8 + adcs x4, x6, x4 + mul x6, x16, x8 + adcs x3, x6, x3 + mul x6, x15, x8 + adcs x11, x6, x11 + mul x6, x10, x8 + adcs x2, x6, x2 + adcs x6, xzr, xzr + adds x18, x18, x7 + ldr x7, [x1, #32] + umulh x10, x10, x8 + umulh x15, x15, x8 + umulh x16, x16, x8 + umulh x9, x9, x8 + umulh x8, x8, x8 + adcs x8, x4, x8 + adcs x9, x3, x9 + ldp x3, x4, [x1] + adcs x11, x11, x16 + mul x16, x12, x5 + adcs x15, x2, x15 + mul x2, x14, x5 + adcs x10, x6, x10 + mul x6, x7, x5 + adds x16, x16, x18 + mul x18, x19, x5 + str x16, [x0, #16] + mul x16, x13, x5 + adcs x8, x16, x8 + mul x16, x5, x5 + adcs x9, x16, x9 + umulh x16, x7, x5 + adcs x11, x18, x11 + adcs x15, x6, x15 + umulh x6, x12, x5 + adcs x10, x2, x10 + adcs x2, xzr, xzr + adds x8, x8, x6 + umulh x6, x13, x5 + adcs x9, x9, x6 + umulh x6, x5, x5 + adcs x11, x11, x6 + umulh x6, x19, x5 + adcs x15, x15, x6 + adcs x10, x10, x16 + umulh x5, x14, x5 + adcs x2, x2, x5 + mul x5, x12, x19 + adds x8, x5, x8 + ldp x16, x5, [x1, #16] + ldr x1, [x1, #40] + str x8, [x0, #24] + mul x8, x13, x19 + adcs x8, x8, x9 + mul x9, x14, x19 + adcs x11, x18, x11 + mul x18, x19, x19 + adcs x15, x18, x15 + mul x18, x7, x19 + umulh x14, x14, x19 + umulh x7, x7, x19 + umulh x13, x13, x19 + umulh x12, x12, x19 + umulh x19, x19, x19 + adcs x10, x18, x10 + mul x18, x3, x17 + adcs x9, x9, x2 + adcs x2, xzr, xzr + adds x8, x8, x12 + mul x12, x1, x17 + adcs x11, x11, x13 + mul x13, x5, x17 + adcs x15, x15, x6 + mul x6, x16, x17 + adcs x10, x10, x19 + mul x19, x4, x17 + adcs x9, x9, x7 + mul x7, x17, x17 + adcs x14, x2, x14 + umulh x2, x1, x17 + adds x8, x18, x8 + umulh x18, x5, x17 + str x8, [x0, #32] + umulh x8, x16, x17 + adcs x11, x19, x11 + umulh x19, x4, x17 + adcs x15, x6, x15 + umulh x6, x3, x17 + umulh x17, x17, x17 + adcs x10, x13, x10 + mul x13, x3, x1 + adcs x9, x7, x9 + adcs x14, x12, x14 + adcs x7, xzr, xzr + adds x11, x11, x6 + mul x6, x5, x1 + adcs x15, x15, x19 + mul x19, x16, x1 + adcs x8, x10, x8 + mul x10, x4, x1 + adcs x9, x9, x18 + mul x18, x1, x1 + umulh x3, x3, x1 + umulh x4, x4, x1 + umulh x16, x16, x1 + umulh x5, x5, x1 + umulh x1, x1, x1 + adcs x14, x14, x17 + adcs x17, x7, x2 + adds x11, x13, x11 + str x11, [x0, #40] + adcs x10, x10, x15 + adcs x8, x19, x8 + adcs x9, x6, x9 + adcs x11, x12, x14 + adcs x12, x18, x17 + adcs x13, xzr, xzr + adds x10, x10, x3 + adcs x8, x8, x4 + stp x10, x8, [x0, #48] + adcs x8, x9, x16 + str x8, [x0, #64] + adcs x8, x11, x5 + str x8, [x0, #72] + adcs x8, x12, x2 + str x8, [x0, #80] + adcs x8, x13, x1 + str x8, [x0, #88] + ldp x20, x19, [sp], #16 + ret +.Lfunc_end82: + .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L + + .globl mcl_fp_mont6L + .align 2 + .type mcl_fp_mont6L,@function +mcl_fp_mont6L: // @mcl_fp_mont6L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #48 // =48 + str x0, [sp, #24] // 8-byte Folded Spill + ldr x5, [x2] + ldp x0, x4, [x1, #32] + ldp x16, x18, [x1, #16] + ldp x10, x1, [x1] + ldur x12, [x3, #-8] + str x12, [sp, #40] // 8-byte Folded Spill + ldp x11, x8, [x3, #32] + str x8, [sp, #32] // 8-byte Folded Spill + ldp x13, x17, [x3, #16] + ldp x14, x15, [x3] + ldr x3, [x2, #8] + umulh x6, x4, x5 + mul x7, x4, x5 + umulh x19, x0, x5 + mul x20, x0, x5 + umulh x21, x18, x5 + mul x22, x18, x5 + umulh x23, x16, x5 + mul x24, x16, x5 + umulh x25, x1, x5 + mul x26, x1, x5 + umulh x27, x10, x5 + mul x5, x10, x5 + umulh x28, x3, x4 + adds x26, x27, x26 + mul x27, x5, x12 + adcs x24, x25, x24 + mul x25, x27, x8 + mul x29, x27, x11 + mul x30, x27, x17 + adcs x22, x23, x22 + mul x23, x27, x13 + adcs x20, x21, x20 + mul x21, x27, x15 + adcs x7, x19, x7 + umulh x19, x27, x14 + adcs x6, x6, xzr + adds x19, x19, x21 + umulh x21, x27, x15 + adcs x21, x21, x23 + umulh x23, x27, x13 + adcs x23, x23, x30 + umulh x30, x27, x17 + adcs x29, x30, x29 + umulh x30, x27, x11 + adcs x25, x30, x25 + umulh x30, x27, x8 + mul x27, x27, x14 + adcs x30, x30, xzr + cmn x27, x5 + mul x5, x3, x4 + umulh x27, x3, x0 + adcs x19, x19, x26 + mul x26, x3, x0 + adcs x21, x21, x24 + mul x24, x3, x18 + adcs x22, x23, x22 + mul x23, x3, x16 + adcs x20, x29, x20 + mul x29, x3, x1 + adcs x7, x25, x7 + umulh x25, x3, x10 + adcs x30, x30, x6 + adcs x6, xzr, xzr + adds x25, x25, x29 + umulh x29, x3, x1 + adcs x23, x29, x23 + umulh x29, x3, x16 + adcs x24, x29, x24 + umulh x29, x3, x18 + mul x3, x3, x10 + adcs x26, x29, x26 + adcs x27, x27, x5 + adcs x29, x28, xzr + adds x3, x19, x3 + adcs x5, x21, x25 + mul x21, x3, x12 + adcs x28, x22, x23 + umulh x22, x21, x8 + mul x23, x21, x8 + mul x25, x21, x11 + mul x9, x21, x17 + adcs x19, x20, x24 + mul x8, x21, x13 + adcs x20, x7, x26 + mul x24, x21, x15 + adcs x30, x30, x27 + umulh x26, x21, x14 + adcs x6, x6, x29 + adcs x7, xzr, xzr + adds x24, x26, x24 + umulh x26, x21, x15 + adcs x29, x26, x8 + umulh x8, x21, x13 + adcs x26, x8, x9 + umulh x8, x21, x17 + adcs x27, x8, x25 + umulh x8, x21, x11 + mul x9, x21, x14 + adcs x8, x8, x23 + adcs x21, x22, xzr + cmn x9, x3 + ldp x23, x3, [x2, #16] + umulh x9, x23, x4 + adcs x5, x24, x5 + mul x22, x23, x4 + adcs x24, x29, x28 + mul x25, x23, x0 + adcs x19, x26, x19 + mul x26, x23, x18 + adcs x20, x27, x20 + mul x27, x23, x16 + adcs x8, x8, x30 + mul x28, x23, x1 + adcs x21, x21, x6 + umulh x6, x23, x10 + adcs x7, x7, xzr + adds x6, x6, x28 + umulh x28, x23, x1 + adcs x27, x28, x27 + umulh x28, x23, x16 + adcs x26, x28, x26 + umulh x28, x23, x18 + adcs x25, x28, x25 + umulh x28, x23, x0 + mul x23, x23, x10 + adcs x22, x28, x22 + adcs x9, x9, xzr + adds x23, x5, x23 + adcs x5, x24, x6 + mul x29, x23, x12 + adcs x6, x19, x27 + ldr x12, [sp, #32] // 8-byte Folded Reload + mul x28, x29, x12 + mul x27, x29, x11 + mul x30, x29, x17 + adcs x19, x20, x26 + mul x26, x29, x13 + adcs x20, x8, x25 + mul x8, x29, x15 + adcs x21, x21, x22 + umulh x24, x29, x14 + adcs x22, x7, x9 + adcs x7, xzr, xzr + adds x24, x24, x8 + umulh x8, x29, x15 + adcs x25, x8, x26 + umulh x8, x29, x13 + adcs x26, x8, x30 + umulh x8, x29, x17 + adcs x27, x8, x27 + umulh x8, x29, x11 + adcs x28, x8, x28 + umulh x8, x29, x12 + mul x9, x29, x14 + adcs x29, x8, xzr + cmn x9, x23 + ldp x23, x8, [x2, #32] + umulh x30, x3, x4 + adcs x2, x24, x5 + mul x5, x3, x4 + adcs x6, x25, x6 + mul x24, x3, x0 + adcs x19, x26, x19 + mul x25, x3, x18 + adcs x20, x27, x20 + mul x26, x3, x16 + adcs x21, x28, x21 + mul x27, x3, x1 + adcs x22, x29, x22 + mov x9, x10 + umulh x28, x3, x9 + adcs x7, x7, xzr + adds x27, x28, x27 + umulh x28, x3, x1 + adcs x26, x28, x26 + umulh x28, x3, x16 + adcs x25, x28, x25 + umulh x28, x3, x18 + adcs x24, x28, x24 + umulh x28, x3, x0 + mul x3, x3, x9 + adcs x5, x28, x5 + adcs x29, x30, xzr + adds x2, x2, x3 + adcs x3, x6, x27 + ldr x10, [sp, #40] // 8-byte Folded Reload + mul x6, x2, x10 + adcs x19, x19, x26 + mul x26, x6, x12 + mul x27, x6, x11 + mov x30, x17 + mul x28, x6, x30 + adcs x20, x20, x25 + mul x25, x6, x13 + adcs x21, x21, x24 + mov x17, x15 + mul x24, x6, x17 + adcs x5, x22, x5 + umulh x22, x6, x14 + adcs x29, x7, x29 + adcs x7, xzr, xzr + adds x22, x22, x24 + umulh x24, x6, x17 + adcs x24, x24, x25 + umulh x25, x6, x13 + mov x15, x13 + adcs x25, x25, x28 + umulh x28, x6, x30 + mov x13, x30 + adcs x27, x28, x27 + umulh x28, x6, x11 + adcs x26, x28, x26 + umulh x28, x6, x12 + mul x6, x6, x14 + adcs x28, x28, xzr + cmn x6, x2 + umulh x2, x23, x4 + mul x6, x23, x4 + adcs x3, x22, x3 + umulh x22, x23, x0 + adcs x19, x24, x19 + mul x24, x23, x0 + adcs x20, x25, x20 + mul x25, x23, x18 + adcs x21, x27, x21 + mul x27, x23, x16 + adcs x5, x26, x5 + mul x26, x23, x1 + adcs x29, x28, x29 + umulh x28, x23, x9 + adcs x7, x7, xzr + adds x26, x28, x26 + umulh x28, x23, x1 + adcs x27, x28, x27 + umulh x28, x23, x16 + adcs x25, x28, x25 + umulh x28, x23, x18 + mul x23, x23, x9 + adcs x24, x28, x24 + umulh x28, x8, x4 + str x28, [sp, #16] // 8-byte Folded Spill + mul x28, x8, x4 + adcs x6, x22, x6 + adcs x2, x2, xzr + adds x3, x3, x23 + adcs x19, x19, x26 + mul x22, x3, x10 + adcs x20, x20, x27 + mul x23, x22, x12 + mul x26, x22, x11 + mul x27, x22, x13 + adcs x21, x21, x25 + mul x25, x22, x15 + adcs x5, x5, x24 + mul x24, x22, x17 + adcs x4, x29, x6 + umulh x6, x22, x14 + adcs x2, x7, x2 + adcs x7, xzr, xzr + adds x6, x6, x24 + umulh x24, x22, x17 + adcs x24, x24, x25 + umulh x25, x22, x15 + adcs x25, x25, x27 + umulh x27, x22, x13 + adcs x26, x27, x26 + umulh x27, x22, x11 + adcs x23, x27, x23 + umulh x27, x22, x12 + mul x22, x22, x14 + adcs x27, x27, xzr + cmn x22, x3 + umulh x3, x8, x0 + mul x0, x8, x0 + umulh x22, x8, x18 + mul x18, x8, x18 + umulh x29, x8, x16 + mul x16, x8, x16 + umulh x30, x8, x1 + mul x1, x8, x1 + umulh x10, x8, x9 + mul x8, x8, x9 + adcs x6, x6, x19 + adcs x19, x24, x20 + adcs x20, x25, x21 + adcs x5, x26, x5 + adcs x9, x23, x4 + str x9, [sp, #8] // 8-byte Folded Spill + adcs x2, x27, x2 + adcs x7, x7, xzr + adds x9, x10, x1 + adcs x16, x30, x16 + adcs x18, x29, x18 + adcs x0, x22, x0 + adcs x1, x3, x28 + ldr x10, [sp, #16] // 8-byte Folded Reload + adcs x3, x10, xzr + adds x8, x6, x8 + adcs x9, x19, x9 + ldr x10, [sp, #40] // 8-byte Folded Reload + mul x4, x8, x10 + adcs x16, x20, x16 + umulh x6, x4, x12 + mul x19, x4, x12 + mov x30, x11 + umulh x20, x4, x30 + mul x21, x4, x30 + umulh x22, x4, x13 + mul x23, x4, x13 + mov x29, x13 + umulh x24, x4, x15 + mul x25, x4, x15 + umulh x26, x4, x17 + mul x27, x4, x17 + umulh x28, x4, x14 + mul x4, x4, x14 + adcs x18, x5, x18 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x10, x10, x0 + adcs x0, x2, x1 + adcs x1, x7, x3 + adcs x2, xzr, xzr + adds x3, x28, x27 + adcs x5, x26, x25 + adcs x7, x24, x23 + adcs x21, x22, x21 + adcs x19, x20, x19 + adcs x6, x6, xzr + cmn x4, x8 + adcs x8, x3, x9 + adcs x9, x5, x16 + adcs x16, x7, x18 + adcs x10, x21, x10 + adcs x18, x19, x0 + adcs x0, x6, x1 + adcs x1, x2, xzr + subs x13, x8, x14 + sbcs x12, x9, x17 + sbcs x11, x16, x15 + sbcs x14, x10, x29 + sbcs x15, x18, x30 + ldr x17, [sp, #32] // 8-byte Folded Reload + sbcs x17, x0, x17 + sbcs x1, x1, xzr + tst x1, #0x1 + csel x8, x8, x13, ne + csel x9, x9, x12, ne + csel x11, x16, x11, ne + csel x10, x10, x14, ne + csel x12, x18, x15, ne + csel x13, x0, x17, ne + ldr x14, [sp, #24] // 8-byte Folded Reload + stp x8, x9, [x14] + stp x11, x10, [x14, #16] + stp x12, x13, [x14, #32] + add sp, sp, #48 // =48 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end83: + .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L + + .globl mcl_fp_montNF6L + .align 2 + .type mcl_fp_montNF6L,@function +mcl_fp_montNF6L: // @mcl_fp_montNF6L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #112 // =112 + str x0, [sp, #96] // 8-byte Folded Spill + ldp x16, x12, [x1, #32] + ldp x13, x11, [x1, #16] + ldp x17, x0, [x1] + ldur x18, [x3, #-8] + ldr x9, [x3, #32] + str x9, [sp, #104] // 8-byte Folded Spill + ldr x14, [x3, #40] + ldp x4, x10, [x3, #16] + ldr x15, [x3] + str x15, [sp, #8] // 8-byte Folded Spill + ldr x9, [x3, #8] + ldp x5, x3, [x2] + ldp x6, x7, [x2, #16] + ldp x19, x2, [x2, #32] + umulh x20, x12, x5 + mul x21, x12, x5 + umulh x22, x16, x5 + mul x23, x16, x5 + umulh x24, x11, x5 + mul x25, x11, x5 + mov x1, x13 + umulh x26, x1, x5 + mul x27, x1, x5 + mov x13, x0 + umulh x28, x13, x5 + mul x29, x13, x5 + mov x8, x17 + umulh x30, x8, x5 + mul x5, x8, x5 + adds x29, x30, x29 + mul x30, x3, x12 + adcs x27, x28, x27 + mul x28, x3, x16 + adcs x25, x26, x25 + mul x26, x3, x11 + adcs x23, x24, x23 + mul x24, x5, x18 + adcs x21, x22, x21 + mul x22, x24, x15 + adcs x20, x20, xzr + cmn x22, x5 + mul x5, x3, x1 + mov x0, x9 + mul x22, x24, x0 + adcs x22, x22, x29 + mul x29, x24, x4 + adcs x17, x29, x27 + mul x29, x24, x10 + adcs x25, x29, x25 + ldr x9, [sp, #104] // 8-byte Folded Reload + mul x29, x24, x9 + adcs x23, x29, x23 + mul x29, x24, x14 + adcs x21, x29, x21 + umulh x29, x24, x15 + adcs x20, x20, xzr + adds x22, x22, x29 + umulh x29, x24, x0 + adcs x15, x17, x29 + umulh x29, x24, x4 + mov x17, x4 + adcs x25, x25, x29 + umulh x29, x24, x10 + adcs x23, x23, x29 + umulh x29, x24, x9 + adcs x21, x21, x29 + mul x29, x3, x13 + umulh x24, x24, x14 + adcs x20, x20, x24 + umulh x24, x3, x8 + adds x24, x24, x29 + umulh x29, x3, x13 + adcs x5, x29, x5 + umulh x29, x3, x1 + adcs x26, x29, x26 + umulh x29, x3, x11 + adcs x28, x29, x28 + umulh x29, x3, x16 + adcs x29, x29, x30 + umulh x30, x3, x12 + mul x3, x3, x8 + adcs x30, x30, xzr + adds x3, x3, x22 + umulh x22, x6, x12 + adcs x24, x24, x15 + mul x27, x6, x12 + adcs x5, x5, x25 + mul x25, x6, x16 + adcs x23, x26, x23 + mul x26, x6, x11 + adcs x21, x28, x21 + mul x28, x3, x18 + mov x4, x18 + adcs x20, x29, x20 + ldr x18, [sp, #8] // 8-byte Folded Reload + mul x29, x28, x18 + adcs x30, x30, xzr + cmn x29, x3 + mul x3, x6, x1 + mul x29, x28, x0 + adcs x24, x29, x24 + mul x29, x28, x17 + adcs x5, x29, x5 + mul x29, x28, x10 + adcs x23, x29, x23 + mul x29, x28, x9 + adcs x21, x29, x21 + mul x29, x28, x14 + adcs x20, x29, x20 + umulh x29, x28, x18 + adcs x30, x30, xzr + adds x24, x24, x29 + umulh x29, x28, x0 + adcs x5, x5, x29 + umulh x29, x28, x17 + adcs x23, x23, x29 + umulh x29, x28, x10 + adcs x21, x21, x29 + umulh x29, x28, x9 + adcs x20, x20, x29 + mul x29, x6, x13 + umulh x28, x28, x14 + adcs x28, x30, x28 + umulh x30, x6, x8 + adds x29, x30, x29 + umulh x30, x6, x13 + adcs x3, x30, x3 + umulh x30, x6, x1 + adcs x26, x30, x26 + umulh x30, x6, x11 + adcs x25, x30, x25 + umulh x30, x6, x16 + mul x6, x6, x8 + adcs x27, x30, x27 + umulh x30, x7, x12 + adcs x22, x22, xzr + adds x6, x6, x24 + mul x24, x7, x12 + adcs x5, x29, x5 + umulh x29, x7, x16 + adcs x3, x3, x23 + mul x23, x7, x16 + adcs x21, x26, x21 + mul x26, x7, x11 + adcs x20, x25, x20 + mul x25, x6, x4 + adcs x27, x27, x28 + mul x28, x25, x18 + adcs x22, x22, xzr + cmn x28, x6 + mul x6, x7, x1 + mul x28, x25, x0 + adcs x5, x28, x5 + mul x28, x25, x17 + adcs x3, x28, x3 + mul x28, x25, x10 + adcs x21, x28, x21 + mul x28, x25, x9 + adcs x20, x28, x20 + mul x28, x25, x14 + adcs x27, x28, x27 + umulh x28, x25, x18 + adcs x22, x22, xzr + adds x5, x5, x28 + umulh x28, x25, x0 + adcs x3, x3, x28 + umulh x28, x25, x17 + adcs x21, x21, x28 + umulh x28, x25, x10 + adcs x20, x20, x28 + umulh x28, x25, x9 + adcs x27, x27, x28 + mul x28, x7, x13 + umulh x25, x25, x14 + adcs x22, x22, x25 + umulh x25, x7, x8 + adds x25, x25, x28 + umulh x28, x7, x13 + adcs x6, x28, x6 + umulh x28, x7, x1 + adcs x26, x28, x26 + umulh x28, x7, x11 + mul x7, x7, x8 + adcs x23, x28, x23 + umulh x9, x19, x12 + str x9, [sp, #16] // 8-byte Folded Spill + adcs x24, x29, x24 + mul x9, x19, x12 + str x9, [sp, #32] // 8-byte Folded Spill + adcs x30, x30, xzr + adds x5, x7, x5 + umulh x7, x19, x16 + adcs x3, x25, x3 + mul x25, x19, x16 + adcs x6, x6, x21 + umulh x21, x19, x11 + adcs x20, x26, x20 + mul x26, x19, x11 + adcs x23, x23, x27 + mul x27, x5, x4 + adcs x22, x24, x22 + mul x24, x27, x18 + adcs x30, x30, xzr + cmn x24, x5 + mov x28, x1 + mul x5, x19, x28 + mul x24, x19, x13 + umulh x1, x19, x8 + umulh x9, x19, x13 + umulh x15, x19, x28 + mul x19, x19, x8 + umulh x29, x2, x12 + str x29, [sp, #88] // 8-byte Folded Spill + mul x29, x2, x12 + umulh x12, x2, x16 + str x12, [sp, #80] // 8-byte Folded Spill + mul x12, x2, x16 + str x12, [sp, #72] // 8-byte Folded Spill + umulh x12, x2, x11 + mul x11, x2, x11 + stp x11, x12, [sp, #56] + umulh x11, x2, x28 + str x11, [sp, #48] // 8-byte Folded Spill + mul x11, x2, x28 + str x11, [sp, #40] // 8-byte Folded Spill + umulh x11, x2, x13 + str x11, [sp, #24] // 8-byte Folded Spill + mul x13, x2, x13 + umulh x16, x2, x8 + mul x28, x2, x8 + mul x2, x27, x0 + adcs x2, x2, x3 + mul x3, x27, x17 + adcs x3, x3, x6 + mul x6, x27, x10 + adcs x6, x6, x20 + ldr x8, [sp, #104] // 8-byte Folded Reload + mul x20, x27, x8 + adcs x20, x20, x23 + mul x23, x27, x14 + adcs x22, x23, x22 + adcs x23, x30, xzr + umulh x30, x27, x18 + adds x2, x2, x30 + umulh x30, x27, x0 + adcs x3, x3, x30 + umulh x30, x27, x17 + mov x12, x17 + adcs x6, x6, x30 + umulh x30, x27, x10 + adcs x20, x20, x30 + umulh x30, x27, x8 + mov x11, x8 + adcs x22, x22, x30 + mov x30, x14 + umulh x27, x27, x30 + adcs x23, x23, x27 + adds x8, x1, x24 + adcs x9, x9, x5 + adcs x14, x15, x26 + adcs x5, x21, x25 + ldr x15, [sp, #32] // 8-byte Folded Reload + adcs x7, x7, x15 + ldr x15, [sp, #16] // 8-byte Folded Reload + adcs x21, x15, xzr + adds x2, x19, x2 + adcs x8, x8, x3 + adcs x9, x9, x6 + mov x24, x4 + mul x3, x2, x24 + adcs x14, x14, x20 + mul x6, x3, x30 + adcs x5, x5, x22 + mul x19, x3, x11 + adcs x7, x7, x23 + mul x20, x3, x18 + adcs x21, x21, xzr + cmn x20, x2 + mul x2, x3, x10 + mul x20, x3, x0 + adcs x8, x20, x8 + mul x20, x3, x12 + adcs x9, x20, x9 + umulh x20, x3, x30 + adcs x14, x2, x14 + umulh x2, x3, x11 + mov x27, x11 + adcs x5, x19, x5 + mov x11, x10 + umulh x19, x3, x11 + adcs x6, x6, x7 + umulh x7, x3, x18 + adcs x21, x21, xzr + adds x8, x8, x7 + umulh x7, x3, x12 + umulh x3, x3, x0 + adcs x9, x9, x3 + adcs x10, x14, x7 + adcs x3, x5, x19 + adcs x2, x6, x2 + adcs x5, x21, x20 + adds x15, x16, x13 + ldr x13, [sp, #40] // 8-byte Folded Reload + ldr x14, [sp, #24] // 8-byte Folded Reload + adcs x16, x14, x13 + ldp x14, x13, [sp, #48] + adcs x17, x14, x13 + ldp x14, x13, [sp, #64] + adcs x1, x14, x13 + ldr x13, [sp, #80] // 8-byte Folded Reload + adcs x4, x13, x29 + ldr x13, [sp, #88] // 8-byte Folded Reload + adcs x6, x13, xzr + adds x8, x28, x8 + adcs x9, x15, x9 + mul x15, x8, x24 + adcs x10, x16, x10 + mul x16, x15, x30 + mul x14, x15, x27 + mul x7, x15, x11 + mul x19, x15, x12 + mul x20, x15, x0 + mul x21, x15, x18 + umulh x22, x15, x30 + umulh x23, x15, x27 + umulh x24, x15, x11 + mov x28, x11 + umulh x25, x15, x12 + umulh x26, x15, x0 + umulh x15, x15, x18 + adcs x17, x17, x3 + adcs x1, x1, x2 + adcs x2, x4, x5 + adcs x3, x6, xzr + cmn x21, x8 + adcs x8, x20, x9 + adcs x9, x19, x10 + adcs x10, x7, x17 + adcs x17, x14, x1 + adcs x16, x16, x2 + adcs x11, x3, xzr + adds x8, x8, x15 + adcs x9, x9, x26 + adcs x10, x10, x25 + adcs x15, x17, x24 + adcs x16, x16, x23 + adcs x17, x11, x22 + subs x3, x8, x18 + sbcs x2, x9, x0 + sbcs x11, x10, x12 + sbcs x14, x15, x28 + sbcs x18, x16, x27 + sbcs x0, x17, x30 + asr x1, x0, #63 + cmp x1, #0 // =0 + csel x8, x8, x3, lt + csel x9, x9, x2, lt + csel x10, x10, x11, lt + csel x11, x15, x14, lt + csel x12, x16, x18, lt + csel x13, x17, x0, lt + ldr x14, [sp, #96] // 8-byte Folded Reload + stp x8, x9, [x14] + stp x10, x11, [x14, #16] + stp x12, x13, [x14, #32] + add sp, sp, #112 // =112 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end84: + .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L + + .globl mcl_fp_montRed6L + .align 2 + .type mcl_fp_montRed6L,@function +mcl_fp_montRed6L: // @mcl_fp_montRed6L +// BB#0: + stp x26, x25, [sp, #-64]! + stp x24, x23, [sp, #16] + stp x22, x21, [sp, #32] + stp x20, x19, [sp, #48] + ldur x14, [x2, #-8] + ldp x9, x8, [x2, #32] + ldp x11, x10, [x2, #16] + ldp x13, x12, [x2] + ldp x16, x17, [x1, #80] + ldp x18, x2, [x1, #64] + ldp x3, x4, [x1, #48] + ldp x5, x6, [x1, #32] + ldp x7, x19, [x1, #16] + ldp x15, x1, [x1] + mul x20, x15, x14 + mul x21, x20, x8 + mul x22, x20, x9 + mul x23, x20, x10 + mul x24, x20, x11 + mul x25, x20, x12 + umulh x26, x20, x13 + adds x25, x26, x25 + umulh x26, x20, x12 + adcs x24, x26, x24 + umulh x26, x20, x11 + adcs x23, x26, x23 + umulh x26, x20, x10 + adcs x22, x26, x22 + umulh x26, x20, x9 + adcs x21, x26, x21 + umulh x26, x20, x8 + mul x20, x20, x13 + adcs x26, x26, xzr + cmn x15, x20 + adcs x15, x1, x25 + adcs x1, x7, x24 + mul x7, x15, x14 + adcs x19, x19, x23 + mul x20, x7, x8 + mul x23, x7, x9 + mul x24, x7, x10 + mul x25, x7, x11 + adcs x5, x5, x22 + mul x22, x7, x12 + adcs x6, x6, x21 + umulh x21, x7, x13 + adcs x3, x3, x26 + adcs x4, x4, xzr + adcs x18, x18, xzr + adcs x2, x2, xzr + adcs x16, x16, xzr + adcs x17, x17, xzr + adcs x26, xzr, xzr + adds x21, x21, x22 + umulh x22, x7, x12 + adcs x22, x22, x25 + umulh x25, x7, x11 + adcs x24, x25, x24 + umulh x25, x7, x10 + adcs x23, x25, x23 + umulh x25, x7, x9 + adcs x20, x25, x20 + umulh x25, x7, x8 + mul x7, x7, x13 + adcs x25, x25, xzr + cmn x7, x15 + adcs x15, x21, x1 + adcs x1, x22, x19 + mul x7, x15, x14 + adcs x5, x24, x5 + mul x19, x7, x8 + mul x21, x7, x9 + mul x22, x7, x10 + adcs x6, x23, x6 + mul x23, x7, x11 + adcs x3, x20, x3 + mul x20, x7, x12 + adcs x4, x25, x4 + umulh x24, x7, x13 + adcs x18, x18, xzr + adcs x2, x2, xzr + adcs x16, x16, xzr + adcs x17, x17, xzr + adcs x25, x26, xzr + adds x20, x24, x20 + umulh x24, x7, x12 + adcs x23, x24, x23 + umulh x24, x7, x11 + adcs x22, x24, x22 + umulh x24, x7, x10 + adcs x21, x24, x21 + umulh x24, x7, x9 + adcs x19, x24, x19 + umulh x24, x7, x8 + mul x7, x7, x13 + adcs x24, x24, xzr + cmn x7, x15 + adcs x15, x20, x1 + adcs x1, x23, x5 + mul x5, x15, x14 + adcs x6, x22, x6 + mul x7, x5, x8 + mul x20, x5, x9 + mul x22, x5, x10 + adcs x3, x21, x3 + mul x21, x5, x11 + adcs x4, x19, x4 + mul x19, x5, x12 + adcs x18, x24, x18 + umulh x23, x5, x13 + adcs x2, x2, xzr + adcs x16, x16, xzr + adcs x17, x17, xzr + adcs x24, x25, xzr + adds x19, x23, x19 + umulh x23, x5, x12 + adcs x21, x23, x21 + umulh x23, x5, x11 + adcs x22, x23, x22 + umulh x23, x5, x10 + adcs x20, x23, x20 + umulh x23, x5, x9 + adcs x7, x23, x7 + umulh x23, x5, x8 + mul x5, x5, x13 + adcs x23, x23, xzr + cmn x5, x15 + adcs x15, x19, x1 + adcs x1, x21, x6 + mul x5, x15, x14 + adcs x3, x22, x3 + mul x6, x5, x8 + mul x19, x5, x9 + mul x21, x5, x10 + adcs x4, x20, x4 + mul x20, x5, x11 + adcs x18, x7, x18 + mul x7, x5, x12 + adcs x2, x23, x2 + umulh x22, x5, x13 + adcs x16, x16, xzr + adcs x17, x17, xzr + adcs x23, x24, xzr + adds x7, x22, x7 + umulh x22, x5, x12 + adcs x20, x22, x20 + umulh x22, x5, x11 + adcs x21, x22, x21 + umulh x22, x5, x10 + adcs x19, x22, x19 + umulh x22, x5, x9 + adcs x6, x22, x6 + umulh x22, x5, x8 + mul x5, x5, x13 + adcs x22, x22, xzr + cmn x5, x15 + adcs x15, x7, x1 + adcs x1, x20, x3 + mul x14, x15, x14 + adcs x3, x21, x4 + mul x4, x14, x8 + mul x5, x14, x9 + mul x7, x14, x10 + adcs x18, x19, x18 + mul x19, x14, x11 + adcs x2, x6, x2 + mul x6, x14, x12 + adcs x16, x22, x16 + umulh x20, x14, x13 + adcs x17, x17, xzr + adcs x21, x23, xzr + adds x6, x20, x6 + umulh x20, x14, x12 + adcs x19, x20, x19 + umulh x20, x14, x11 + adcs x7, x20, x7 + umulh x20, x14, x10 + adcs x5, x20, x5 + umulh x20, x14, x9 + adcs x4, x20, x4 + umulh x20, x14, x8 + mul x14, x14, x13 + adcs x20, x20, xzr + cmn x14, x15 + adcs x14, x6, x1 + adcs x15, x19, x3 + adcs x18, x7, x18 + adcs x1, x5, x2 + adcs x16, x4, x16 + adcs x17, x20, x17 + adcs x2, x21, xzr + subs x13, x14, x13 + sbcs x12, x15, x12 + sbcs x11, x18, x11 + sbcs x10, x1, x10 + sbcs x9, x16, x9 + sbcs x8, x17, x8 + sbcs x2, x2, xzr + tst x2, #0x1 + csel x13, x14, x13, ne + csel x12, x15, x12, ne + csel x11, x18, x11, ne + csel x10, x1, x10, ne + csel x9, x16, x9, ne + csel x8, x17, x8, ne + stp x13, x12, [x0] + stp x11, x10, [x0, #16] + stp x9, x8, [x0, #32] + ldp x20, x19, [sp, #48] + ldp x22, x21, [sp, #32] + ldp x24, x23, [sp, #16] + ldp x26, x25, [sp], #64 + ret +.Lfunc_end85: + .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L + + .globl mcl_fp_addPre6L + .align 2 + .type mcl_fp_addPre6L,@function +mcl_fp_addPre6L: // @mcl_fp_addPre6L +// BB#0: + ldp x8, x9, [x2, #32] + ldp x10, x11, [x1, #32] + ldp x12, x13, [x2, #16] + ldp x14, x15, [x2] + ldp x16, x17, [x1] + ldp x18, x1, [x1, #16] + adds x14, x14, x16 + str x14, [x0] + adcs x14, x15, x17 + adcs x12, x12, x18 + stp x14, x12, [x0, #8] + adcs x12, x13, x1 + adcs x8, x8, x10 + stp x12, x8, [x0, #24] + adcs x9, x9, x11 + adcs x8, xzr, xzr + str x9, [x0, #40] + mov x0, x8 + ret +.Lfunc_end86: + .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L + + .globl mcl_fp_subPre6L + .align 2 + .type mcl_fp_subPre6L,@function +mcl_fp_subPre6L: // @mcl_fp_subPre6L +// BB#0: + ldp x8, x9, [x2, #32] + ldp x10, x11, [x1, #32] + ldp x12, x13, [x2, #16] + ldp x14, x15, [x2] + ldp x16, x17, [x1] + ldp x18, x1, [x1, #16] + subs x14, x16, x14 + str x14, [x0] + sbcs x14, x17, x15 + sbcs x12, x18, x12 + stp x14, x12, [x0, #8] + sbcs x12, x1, x13 + sbcs x8, x10, x8 + stp x12, x8, [x0, #24] + sbcs x9, x11, x9 + ngcs x8, xzr + and x8, x8, #0x1 + str x9, [x0, #40] + mov x0, x8 + ret +.Lfunc_end87: + .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L + + .globl mcl_fp_shr1_6L + .align 2 + .type mcl_fp_shr1_6L,@function +mcl_fp_shr1_6L: // @mcl_fp_shr1_6L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x1, #16] + ldp x12, x13, [x1, #32] + extr x8, x9, x8, #1 + extr x9, x10, x9, #1 + extr x10, x11, x10, #1 + extr x11, x12, x11, #1 + extr x12, x13, x12, #1 + lsr x13, x13, #1 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + stp x12, x13, [x0, #32] + ret +.Lfunc_end88: + .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L + + .globl mcl_fp_add6L + .align 2 + .type mcl_fp_add6L,@function +mcl_fp_add6L: // @mcl_fp_add6L +// BB#0: + ldp x8, x9, [x2, #32] + ldp x10, x11, [x1, #32] + ldp x12, x13, [x2, #16] + ldp x14, x15, [x2] + ldp x16, x17, [x1] + ldp x18, x1, [x1, #16] + adds x14, x14, x16 + adcs x15, x15, x17 + ldp x16, x17, [x3, #32] + adcs x18, x12, x18 + adcs x1, x13, x1 + ldp x12, x2, [x3] + stp x14, x15, [x0] + stp x18, x1, [x0, #16] + adcs x8, x8, x10 + adcs x4, x9, x11 + stp x8, x4, [x0, #32] + adcs x5, xzr, xzr + ldp x9, x10, [x3, #16] + subs x13, x14, x12 + sbcs x12, x15, x2 + sbcs x11, x18, x9 + sbcs x10, x1, x10 + sbcs x9, x8, x16 + sbcs x8, x4, x17 + sbcs x14, x5, xzr + and w14, w14, #0x1 + tbnz w14, #0, .LBB89_2 +// BB#1: // %nocarry + stp x13, x12, [x0] + stp x11, x10, [x0, #16] + stp x9, x8, [x0, #32] +.LBB89_2: // %carry + ret +.Lfunc_end89: + .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L + + .globl mcl_fp_addNF6L + .align 2 + .type mcl_fp_addNF6L,@function +mcl_fp_addNF6L: // @mcl_fp_addNF6L +// BB#0: + ldp x8, x9, [x1, #32] + ldp x10, x11, [x2, #32] + ldp x12, x13, [x1, #16] + ldp x14, x15, [x1] + ldp x16, x17, [x2] + ldp x18, x1, [x2, #16] + adds x14, x16, x14 + adcs x15, x17, x15 + ldp x16, x17, [x3, #32] + adcs x12, x18, x12 + adcs x13, x1, x13 + ldp x18, x1, [x3] + adcs x8, x10, x8 + ldp x10, x2, [x3, #16] + adcs x9, x11, x9 + subs x11, x14, x18 + sbcs x18, x15, x1 + sbcs x10, x12, x10 + sbcs x1, x13, x2 + sbcs x16, x8, x16 + sbcs x17, x9, x17 + asr x2, x17, #63 + cmp x2, #0 // =0 + csel x11, x14, x11, lt + csel x14, x15, x18, lt + csel x10, x12, x10, lt + csel x12, x13, x1, lt + csel x8, x8, x16, lt + csel x9, x9, x17, lt + stp x11, x14, [x0] + stp x10, x12, [x0, #16] + stp x8, x9, [x0, #32] + ret +.Lfunc_end90: + .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L + + .globl mcl_fp_sub6L + .align 2 + .type mcl_fp_sub6L,@function +mcl_fp_sub6L: // @mcl_fp_sub6L +// BB#0: + ldp x12, x13, [x2, #32] + ldp x14, x15, [x1, #32] + ldp x10, x11, [x2, #16] + ldp x8, x9, [x2] + ldp x16, x17, [x1] + ldp x18, x1, [x1, #16] + subs x8, x16, x8 + sbcs x9, x17, x9 + stp x8, x9, [x0] + sbcs x10, x18, x10 + sbcs x11, x1, x11 + stp x10, x11, [x0, #16] + sbcs x12, x14, x12 + sbcs x13, x15, x13 + stp x12, x13, [x0, #32] + ngcs x14, xzr + and w14, w14, #0x1 + tbnz w14, #0, .LBB91_2 +// BB#1: // %nocarry + ret +.LBB91_2: // %carry + ldp x14, x15, [x3, #32] + ldp x16, x17, [x3] + ldp x18, x1, [x3, #16] + adds x8, x16, x8 + adcs x9, x17, x9 + adcs x10, x18, x10 + adcs x11, x1, x11 + adcs x12, x14, x12 + adcs x13, x15, x13 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + stp x12, x13, [x0, #32] + ret +.Lfunc_end91: + .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L + + .globl mcl_fp_subNF6L + .align 2 + .type mcl_fp_subNF6L,@function +mcl_fp_subNF6L: // @mcl_fp_subNF6L +// BB#0: + ldp x8, x9, [x2, #32] + ldp x10, x11, [x1, #32] + ldp x12, x13, [x2, #16] + ldp x14, x18, [x2] + ldp x16, x17, [x1, #16] + ldp x15, x1, [x1] + subs x14, x15, x14 + ldp x15, x2, [x3, #32] + sbcs x18, x1, x18 + sbcs x12, x16, x12 + ldp x16, x1, [x3, #16] + sbcs x13, x17, x13 + ldp x17, x3, [x3] + sbcs x8, x10, x8 + sbcs x9, x11, x9 + asr x10, x9, #63 + adds x11, x10, x10 + and x16, x10, x16 + and x1, x10, x1 + and x15, x10, x15 + and x2, x10, x2 + adcs x10, x10, x10 + orr x11, x11, x9, lsr #63 + and x11, x11, x17 + and x10, x10, x3 + adds x11, x11, x14 + adcs x10, x10, x18 + stp x11, x10, [x0] + adcs x10, x16, x12 + str x10, [x0, #16] + adcs x10, x1, x13 + adcs x8, x15, x8 + stp x10, x8, [x0, #24] + adcs x8, x2, x9 + str x8, [x0, #40] + ret +.Lfunc_end92: + .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L + + .globl mcl_fpDbl_add6L + .align 2 + .type mcl_fpDbl_add6L,@function +mcl_fpDbl_add6L: // @mcl_fpDbl_add6L +// BB#0: + stp x26, x25, [sp, #-64]! + stp x24, x23, [sp, #16] + stp x22, x21, [sp, #32] + stp x20, x19, [sp, #48] + ldp x8, x9, [x2, #80] + ldp x10, x11, [x1, #80] + ldp x12, x13, [x2, #64] + ldp x14, x15, [x1, #64] + ldp x16, x17, [x2, #48] + ldp x18, x4, [x1, #48] + ldp x5, x6, [x2, #32] + ldp x7, x19, [x1, #32] + ldp x20, x21, [x2, #16] + ldp x23, x2, [x2] + ldp x24, x25, [x1, #16] + ldp x22, x1, [x1] + adds x22, x23, x22 + str x22, [x0] + ldp x22, x23, [x3, #32] + adcs x1, x2, x1 + str x1, [x0, #8] + ldp x1, x2, [x3, #16] + adcs x20, x20, x24 + ldp x24, x3, [x3] + str x20, [x0, #16] + adcs x20, x21, x25 + adcs x5, x5, x7 + stp x20, x5, [x0, #24] + adcs x5, x6, x19 + str x5, [x0, #40] + adcs x16, x16, x18 + adcs x17, x17, x4 + adcs x12, x12, x14 + adcs x13, x13, x15 + adcs x8, x8, x10 + adcs x9, x9, x11 + adcs x10, xzr, xzr + subs x11, x16, x24 + sbcs x14, x17, x3 + sbcs x15, x12, x1 + sbcs x18, x13, x2 + sbcs x1, x8, x22 + sbcs x2, x9, x23 + sbcs x10, x10, xzr + tst x10, #0x1 + csel x10, x16, x11, ne + csel x11, x17, x14, ne + csel x12, x12, x15, ne + csel x13, x13, x18, ne + csel x8, x8, x1, ne + csel x9, x9, x2, ne + stp x10, x11, [x0, #48] + stp x12, x13, [x0, #64] + stp x8, x9, [x0, #80] + ldp x20, x19, [sp, #48] + ldp x22, x21, [sp, #32] + ldp x24, x23, [sp, #16] + ldp x26, x25, [sp], #64 + ret +.Lfunc_end93: + .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L + + .globl mcl_fpDbl_sub6L + .align 2 + .type mcl_fpDbl_sub6L,@function +mcl_fpDbl_sub6L: // @mcl_fpDbl_sub6L +// BB#0: + stp x26, x25, [sp, #-64]! + stp x24, x23, [sp, #16] + stp x22, x21, [sp, #32] + stp x20, x19, [sp, #48] + ldp x8, x9, [x2, #80] + ldp x10, x11, [x1, #80] + ldp x12, x13, [x2, #64] + ldp x14, x15, [x1, #64] + ldp x16, x17, [x2, #48] + ldp x18, x4, [x1, #48] + ldp x5, x6, [x2, #32] + ldp x7, x19, [x1, #32] + ldp x20, x21, [x2, #16] + ldp x22, x2, [x2] + ldp x24, x25, [x1, #16] + ldp x23, x1, [x1] + subs x22, x23, x22 + str x22, [x0] + ldp x22, x23, [x3, #32] + sbcs x1, x1, x2 + str x1, [x0, #8] + ldp x1, x2, [x3, #16] + sbcs x20, x24, x20 + ldp x24, x3, [x3] + str x20, [x0, #16] + sbcs x20, x25, x21 + sbcs x5, x7, x5 + stp x20, x5, [x0, #24] + sbcs x5, x19, x6 + sbcs x16, x18, x16 + sbcs x17, x4, x17 + sbcs x12, x14, x12 + sbcs x13, x15, x13 + sbcs x8, x10, x8 + sbcs x9, x11, x9 + ngcs x10, xzr + tst x10, #0x1 + csel x10, x23, xzr, ne + csel x11, x22, xzr, ne + csel x14, x2, xzr, ne + csel x15, x1, xzr, ne + csel x18, x3, xzr, ne + csel x1, x24, xzr, ne + adds x16, x1, x16 + stp x5, x16, [x0, #40] + adcs x16, x18, x17 + adcs x12, x15, x12 + stp x16, x12, [x0, #56] + adcs x12, x14, x13 + adcs x8, x11, x8 + stp x12, x8, [x0, #72] + adcs x8, x10, x9 + str x8, [x0, #88] + ldp x20, x19, [sp, #48] + ldp x22, x21, [sp, #32] + ldp x24, x23, [sp, #16] + ldp x26, x25, [sp], #64 + ret +.Lfunc_end94: + .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L + + .globl mcl_fp_mulUnitPre7L + .align 2 + .type mcl_fp_mulUnitPre7L,@function +mcl_fp_mulUnitPre7L: // @mcl_fp_mulUnitPre7L +// BB#0: + ldp x10, x8, [x1, #40] + ldp x14, x9, [x1, #24] + ldp x11, x12, [x1] + ldr x13, [x1, #16] + mul x15, x11, x2 + mul x16, x12, x2 + umulh x11, x11, x2 + mul x17, x13, x2 + umulh x12, x12, x2 + mul x18, x14, x2 + umulh x13, x13, x2 + mul x1, x9, x2 + umulh x14, x14, x2 + mul x3, x10, x2 + umulh x9, x9, x2 + mul x4, x8, x2 + umulh x10, x10, x2 + umulh x8, x8, x2 + adds x11, x11, x16 + stp x15, x11, [x0] + adcs x11, x12, x17 + str x11, [x0, #16] + adcs x11, x13, x18 + str x11, [x0, #24] + adcs x11, x14, x1 + adcs x9, x9, x3 + stp x11, x9, [x0, #32] + adcs x9, x10, x4 + adcs x8, x8, xzr + stp x9, x8, [x0, #48] + ret +.Lfunc_end95: + .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L + + .globl mcl_fpDbl_mulPre7L + .align 2 + .type mcl_fpDbl_mulPre7L,@function +mcl_fpDbl_mulPre7L: // @mcl_fpDbl_mulPre7L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #624 // =624 + ldp x8, x9, [x1] + ldp x10, x11, [x1, #24] + ldp x12, x13, [x1, #40] + ldp x14, x15, [x2] + ldp x16, x18, [x1, #16] + mul x17, x8, x14 + str x17, [sp, #528] // 8-byte Folded Spill + umulh x17, x13, x14 + str x17, [sp, #616] // 8-byte Folded Spill + mul x17, x13, x14 + str x17, [sp, #608] // 8-byte Folded Spill + umulh x17, x12, x14 + str x17, [sp, #592] // 8-byte Folded Spill + mul x17, x12, x14 + str x17, [sp, #568] // 8-byte Folded Spill + umulh x17, x11, x14 + str x17, [sp, #552] // 8-byte Folded Spill + mul x17, x11, x14 + str x17, [sp, #512] // 8-byte Folded Spill + umulh x17, x10, x14 + str x17, [sp, #496] // 8-byte Folded Spill + mul x17, x10, x14 + str x17, [sp, #456] // 8-byte Folded Spill + umulh x17, x16, x14 + str x17, [sp, #424] // 8-byte Folded Spill + mul x17, x16, x14 + str x17, [sp, #368] // 8-byte Folded Spill + umulh x17, x9, x14 + str x17, [sp, #352] // 8-byte Folded Spill + mul x17, x9, x14 + str x17, [sp, #304] // 8-byte Folded Spill + umulh x14, x8, x14 + str x14, [sp, #272] // 8-byte Folded Spill + mul x14, x13, x15 + str x14, [sp, #560] // 8-byte Folded Spill + mul x14, x12, x15 + str x14, [sp, #520] // 8-byte Folded Spill + mul x14, x11, x15 + str x14, [sp, #488] // 8-byte Folded Spill + mul x14, x10, x15 + str x14, [sp, #448] // 8-byte Folded Spill + mul x14, x16, x15 + umulh x13, x13, x15 + str x13, [sp, #600] // 8-byte Folded Spill + umulh x12, x12, x15 + str x12, [sp, #576] // 8-byte Folded Spill + umulh x11, x11, x15 + str x11, [sp, #544] // 8-byte Folded Spill + umulh x10, x10, x15 + str x10, [sp, #504] // 8-byte Folded Spill + umulh x10, x16, x15 + str x10, [sp, #472] // 8-byte Folded Spill + mul x10, x9, x15 + str x10, [sp, #208] // 8-byte Folded Spill + umulh x9, x9, x15 + stp x9, x14, [sp, #400] + mul x9, x8, x15 + str x9, [sp, #96] // 8-byte Folded Spill + umulh x8, x8, x15 + str x8, [sp, #320] // 8-byte Folded Spill + ldp x9, x11, [x1] + ldp x10, x17, [x2, #16] + ldp x12, x13, [x1, #16] + ldp x14, x16, [x1, #32] + ldr x15, [x1, #48] + mul x8, x9, x10 + str x8, [sp, #248] // 8-byte Folded Spill + mul x8, x15, x10 + str x8, [sp, #392] // 8-byte Folded Spill + mul x8, x16, x10 + str x8, [sp, #344] // 8-byte Folded Spill + mul x8, x14, x10 + str x8, [sp, #296] // 8-byte Folded Spill + mul x8, x13, x10 + str x8, [sp, #240] // 8-byte Folded Spill + mul x8, x12, x10 + str x8, [sp, #192] // 8-byte Folded Spill + mul x8, x11, x10 + str x8, [sp, #136] // 8-byte Folded Spill + umulh x8, x15, x10 + str x8, [sp, #440] // 8-byte Folded Spill + umulh x8, x16, x10 + str x8, [sp, #384] // 8-byte Folded Spill + umulh x8, x14, x10 + str x8, [sp, #336] // 8-byte Folded Spill + umulh x8, x13, x10 + str x8, [sp, #288] // 8-byte Folded Spill + umulh x8, x12, x10 + str x8, [sp, #232] // 8-byte Folded Spill + umulh x8, x11, x10 + str x8, [sp, #184] // 8-byte Folded Spill + umulh x8, x9, x10 + str x8, [sp, #128] // 8-byte Folded Spill + mul x8, x15, x17 + str x8, [sp, #464] // 8-byte Folded Spill + umulh x8, x15, x17 + str x8, [sp, #584] // 8-byte Folded Spill + mul x8, x16, x17 + str x8, [sp, #376] // 8-byte Folded Spill + umulh x8, x16, x17 + str x8, [sp, #536] // 8-byte Folded Spill + mul x8, x14, x17 + str x8, [sp, #312] // 8-byte Folded Spill + umulh x8, x14, x17 + str x8, [sp, #480] // 8-byte Folded Spill + mul x8, x13, x17 + str x8, [sp, #224] // 8-byte Folded Spill + umulh x8, x13, x17 + str x8, [sp, #416] // 8-byte Folded Spill + mul x8, x12, x17 + str x8, [sp, #144] // 8-byte Folded Spill + umulh x8, x12, x17 + str x8, [sp, #328] // 8-byte Folded Spill + mul x8, x11, x17 + str x8, [sp, #80] // 8-byte Folded Spill + umulh x8, x11, x17 + str x8, [sp, #264] // 8-byte Folded Spill + mul x28, x9, x17 + umulh x8, x9, x17 + str x8, [sp, #176] // 8-byte Folded Spill + ldp x14, x12, [x1, #24] + ldp x10, x9, [x1] + ldr x7, [x1, #16] + ldp x30, x5, [x1, #40] + ldp x27, x8, [x2, #32] + ldr x13, [x1, #48] + mul x11, x10, x27 + str x11, [sp, #48] // 8-byte Folded Spill + mul x11, x5, x27 + str x11, [sp, #168] // 8-byte Folded Spill + mul x11, x30, x27 + str x11, [sp, #120] // 8-byte Folded Spill + mul x11, x12, x27 + str x11, [sp, #72] // 8-byte Folded Spill + mul x11, x14, x27 + str x11, [sp, #40] // 8-byte Folded Spill + mul x11, x7, x27 + str x11, [sp, #16] // 8-byte Folded Spill + mul x24, x9, x27 + umulh x11, x5, x27 + str x11, [sp, #216] // 8-byte Folded Spill + umulh x11, x30, x27 + str x11, [sp, #160] // 8-byte Folded Spill + umulh x11, x12, x27 + str x11, [sp, #112] // 8-byte Folded Spill + umulh x11, x14, x27 + str x11, [sp, #64] // 8-byte Folded Spill + umulh x11, x7, x27 + str x11, [sp, #32] // 8-byte Folded Spill + umulh x29, x9, x27 + umulh x23, x10, x27 + mul x11, x5, x8 + str x11, [sp, #256] // 8-byte Folded Spill + umulh x11, x5, x8 + str x11, [sp, #432] // 8-byte Folded Spill + mul x11, x30, x8 + str x11, [sp, #152] // 8-byte Folded Spill + umulh x11, x30, x8 + str x11, [sp, #360] // 8-byte Folded Spill + mul x11, x12, x8 + str x11, [sp, #88] // 8-byte Folded Spill + umulh x11, x12, x8 + str x11, [sp, #280] // 8-byte Folded Spill + mul x11, x14, x8 + str x11, [sp, #24] // 8-byte Folded Spill + umulh x11, x14, x8 + str x11, [sp, #200] // 8-byte Folded Spill + mul x25, x7, x8 + umulh x11, x7, x8 + str x11, [sp, #104] // 8-byte Folded Spill + mul x22, x9, x8 + umulh x9, x9, x8 + str x9, [sp, #56] // 8-byte Folded Spill + mul x20, x10, x8 + umulh x26, x10, x8 + ldr x10, [x2, #48] + ldp x2, x8, [x1] + ldr x9, [x1, #16] + ldp x11, x1, [x1, #32] + mul x27, x2, x10 + umulh x21, x2, x10 + mul x5, x8, x10 + umulh x19, x8, x10 + mul x3, x9, x10 + umulh x7, x9, x10 + mul x2, x18, x10 + umulh x6, x18, x10 + mul x17, x11, x10 + umulh x4, x11, x10 + mul x16, x1, x10 + umulh x1, x1, x10 + mul x15, x13, x10 + umulh x18, x13, x10 + ldr x8, [sp, #528] // 8-byte Folded Reload + str x8, [x0] + ldr x8, [sp, #304] // 8-byte Folded Reload + ldr x9, [sp, #272] // 8-byte Folded Reload + adds x13, x9, x8 + ldr x8, [sp, #368] // 8-byte Folded Reload + ldr x9, [sp, #352] // 8-byte Folded Reload + adcs x8, x9, x8 + ldr x9, [sp, #456] // 8-byte Folded Reload + ldr x10, [sp, #424] // 8-byte Folded Reload + adcs x9, x10, x9 + ldr x10, [sp, #512] // 8-byte Folded Reload + ldr x11, [sp, #496] // 8-byte Folded Reload + adcs x10, x11, x10 + ldr x11, [sp, #568] // 8-byte Folded Reload + ldr x12, [sp, #552] // 8-byte Folded Reload + adcs x11, x12, x11 + ldr x12, [sp, #608] // 8-byte Folded Reload + ldr x14, [sp, #592] // 8-byte Folded Reload + adcs x12, x14, x12 + ldr x14, [sp, #616] // 8-byte Folded Reload + adcs x14, x14, xzr + ldr x30, [sp, #96] // 8-byte Folded Reload + adds x13, x30, x13 + str x13, [x0, #8] + ldr x13, [sp, #208] // 8-byte Folded Reload + adcs x8, x13, x8 + ldr x13, [sp, #408] // 8-byte Folded Reload + adcs x9, x13, x9 + ldr x13, [sp, #448] // 8-byte Folded Reload + adcs x10, x13, x10 + ldr x13, [sp, #488] // 8-byte Folded Reload + adcs x11, x13, x11 + ldr x13, [sp, #520] // 8-byte Folded Reload + adcs x12, x13, x12 + ldr x13, [sp, #560] // 8-byte Folded Reload + adcs x13, x13, x14 + adcs x14, xzr, xzr + ldr x30, [sp, #320] // 8-byte Folded Reload + adds x8, x8, x30 + ldr x30, [sp, #400] // 8-byte Folded Reload + adcs x9, x9, x30 + ldr x30, [sp, #472] // 8-byte Folded Reload + adcs x10, x10, x30 + ldr x30, [sp, #504] // 8-byte Folded Reload + adcs x11, x11, x30 + ldr x30, [sp, #544] // 8-byte Folded Reload + adcs x12, x12, x30 + ldr x30, [sp, #576] // 8-byte Folded Reload + adcs x13, x13, x30 + ldr x30, [sp, #600] // 8-byte Folded Reload + adcs x14, x14, x30 + ldr x30, [sp, #248] // 8-byte Folded Reload + adds x8, x30, x8 + str x8, [x0, #16] + ldp x30, x8, [sp, #128] + adcs x8, x8, x9 + ldr x9, [sp, #192] // 8-byte Folded Reload + adcs x9, x9, x10 + ldr x10, [sp, #240] // 8-byte Folded Reload + adcs x10, x10, x11 + ldr x11, [sp, #296] // 8-byte Folded Reload + adcs x11, x11, x12 + ldr x12, [sp, #344] // 8-byte Folded Reload + adcs x12, x12, x13 + ldr x13, [sp, #392] // 8-byte Folded Reload + adcs x13, x13, x14 + adcs x14, xzr, xzr + adds x8, x8, x30 + ldr x30, [sp, #184] // 8-byte Folded Reload + adcs x9, x9, x30 + ldr x30, [sp, #232] // 8-byte Folded Reload + adcs x10, x10, x30 + ldr x30, [sp, #288] // 8-byte Folded Reload + adcs x11, x11, x30 + ldr x30, [sp, #336] // 8-byte Folded Reload + adcs x12, x12, x30 + ldr x30, [sp, #384] // 8-byte Folded Reload + adcs x13, x13, x30 + ldr x30, [sp, #440] // 8-byte Folded Reload + adcs x14, x14, x30 + adds x8, x28, x8 + str x8, [x0, #24] + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, x9 + ldr x9, [sp, #144] // 8-byte Folded Reload + adcs x9, x9, x10 + ldr x10, [sp, #224] // 8-byte Folded Reload + adcs x10, x10, x11 + ldr x11, [sp, #312] // 8-byte Folded Reload + adcs x11, x11, x12 + ldr x12, [sp, #376] // 8-byte Folded Reload + adcs x12, x12, x13 + ldr x13, [sp, #464] // 8-byte Folded Reload + adcs x13, x13, x14 + adcs x14, xzr, xzr + ldr x28, [sp, #176] // 8-byte Folded Reload + adds x8, x8, x28 + ldr x28, [sp, #264] // 8-byte Folded Reload + adcs x9, x9, x28 + ldr x28, [sp, #328] // 8-byte Folded Reload + adcs x10, x10, x28 + ldr x28, [sp, #416] // 8-byte Folded Reload + adcs x11, x11, x28 + ldr x28, [sp, #480] // 8-byte Folded Reload + adcs x12, x12, x28 + ldr x28, [sp, #536] // 8-byte Folded Reload + adcs x13, x13, x28 + ldr x28, [sp, #584] // 8-byte Folded Reload + adcs x14, x14, x28 + ldr x28, [sp, #48] // 8-byte Folded Reload + adds x8, x28, x8 + str x8, [x0, #32] + adcs x8, x24, x9 + ldr x9, [sp, #16] // 8-byte Folded Reload + adcs x9, x9, x10 + ldr x10, [sp, #40] // 8-byte Folded Reload + adcs x10, x10, x11 + ldr x11, [sp, #72] // 8-byte Folded Reload + adcs x11, x11, x12 + ldr x12, [sp, #120] // 8-byte Folded Reload + adcs x12, x12, x13 + ldr x13, [sp, #168] // 8-byte Folded Reload + adcs x13, x13, x14 + adcs x14, xzr, xzr + adds x8, x8, x23 + adcs x9, x9, x29 + ldr x23, [sp, #32] // 8-byte Folded Reload + adcs x10, x10, x23 + ldr x23, [sp, #64] // 8-byte Folded Reload + adcs x11, x11, x23 + ldr x23, [sp, #112] // 8-byte Folded Reload + adcs x12, x12, x23 + ldr x23, [sp, #160] // 8-byte Folded Reload + adcs x13, x13, x23 + ldr x23, [sp, #216] // 8-byte Folded Reload + adcs x14, x14, x23 + adds x8, x20, x8 + str x8, [x0, #40] + adcs x8, x22, x9 + adcs x9, x25, x10 + ldr x10, [sp, #24] // 8-byte Folded Reload + adcs x10, x10, x11 + ldr x11, [sp, #88] // 8-byte Folded Reload + adcs x11, x11, x12 + ldr x12, [sp, #152] // 8-byte Folded Reload + adcs x12, x12, x13 + ldr x13, [sp, #256] // 8-byte Folded Reload + adcs x13, x13, x14 + adcs x14, xzr, xzr + adds x8, x8, x26 + ldr x20, [sp, #56] // 8-byte Folded Reload + adcs x9, x9, x20 + ldr x20, [sp, #104] // 8-byte Folded Reload + adcs x10, x10, x20 + ldr x20, [sp, #200] // 8-byte Folded Reload + adcs x11, x11, x20 + ldr x20, [sp, #280] // 8-byte Folded Reload + adcs x12, x12, x20 + ldr x20, [sp, #360] // 8-byte Folded Reload + adcs x13, x13, x20 + ldr x20, [sp, #432] // 8-byte Folded Reload + adcs x14, x14, x20 + adds x8, x27, x8 + str x8, [x0, #48] + adcs x8, x5, x9 + adcs x9, x3, x10 + adcs x10, x2, x11 + adcs x11, x17, x12 + adcs x12, x16, x13 + adcs x13, x15, x14 + adcs x14, xzr, xzr + adds x8, x8, x21 + str x8, [x0, #56] + adcs x8, x9, x19 + str x8, [x0, #64] + adcs x8, x10, x7 + str x8, [x0, #72] + adcs x8, x11, x6 + str x8, [x0, #80] + adcs x8, x12, x4 + str x8, [x0, #88] + adcs x8, x13, x1 + str x8, [x0, #96] + adcs x8, x14, x18 + str x8, [x0, #104] + add sp, sp, #624 // =624 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end96: + .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L + + .globl mcl_fpDbl_sqrPre7L + .align 2 + .type mcl_fpDbl_sqrPre7L,@function +mcl_fpDbl_sqrPre7L: // @mcl_fpDbl_sqrPre7L +// BB#0: + stp x24, x23, [sp, #-48]! + stp x22, x21, [sp, #16] + stp x20, x19, [sp, #32] + ldp x11, x8, [x1] + ldp x9, x10, [x1, #40] + ldp x15, x12, [x1, #16] + ldp x16, x3, [x1, #16] + ldp x13, x14, [x1, #32] + ldp x18, x17, [x1, #32] + ldr x2, [x1, #32] + mul x4, x11, x11 + umulh x5, x10, x11 + mul x6, x9, x11 + mul x7, x18, x11 + mul x19, x3, x11 + umulh x20, x16, x11 + mul x21, x16, x11 + umulh x22, x8, x11 + mul x23, x8, x11 + str x4, [x0] + umulh x4, x11, x11 + adds x4, x4, x23 + adcs x21, x22, x21 + adcs x19, x20, x19 + umulh x20, x3, x11 + adcs x7, x20, x7 + umulh x20, x18, x11 + adcs x6, x20, x6 + mul x20, x10, x11 + umulh x11, x9, x11 + adcs x20, x11, x20 + adcs x5, x5, xzr + adds x4, x23, x4 + ldp x11, x23, [x1, #40] + str x4, [x0, #8] + mul x4, x8, x8 + adcs x4, x4, x21 + mul x21, x16, x8 + adcs x19, x21, x19 + mul x21, x3, x8 + adcs x7, x21, x7 + mul x21, x18, x8 + adcs x6, x21, x6 + mul x21, x9, x8 + adcs x20, x21, x20 + mul x21, x10, x8 + umulh x10, x10, x8 + umulh x9, x9, x8 + umulh x18, x18, x8 + umulh x3, x3, x8 + umulh x16, x16, x8 + umulh x8, x8, x8 + adcs x5, x21, x5 + adcs x21, xzr, xzr + adds x4, x4, x22 + adcs x8, x19, x8 + ldp x19, x22, [x1] + adcs x16, x7, x16 + adcs x3, x6, x3 + ldp x6, x7, [x1, #8] + adcs x18, x20, x18 + mul x20, x19, x15 + adcs x9, x5, x9 + mul x5, x23, x15 + adcs x10, x21, x10 + mul x21, x14, x15 + adds x4, x20, x4 + mul x20, x13, x15 + str x4, [x0, #16] + mul x4, x6, x15 + adcs x8, x4, x8 + mul x4, x15, x15 + adcs x16, x4, x16 + mul x4, x12, x15 + adcs x3, x4, x3 + adcs x18, x20, x18 + umulh x20, x13, x15 + adcs x9, x21, x9 + umulh x21, x19, x15 + adcs x10, x5, x10 + adcs x5, xzr, xzr + adds x8, x8, x21 + umulh x21, x6, x15 + adcs x16, x16, x21 + umulh x21, x15, x15 + adcs x3, x3, x21 + umulh x21, x12, x15 + adcs x18, x18, x21 + adcs x9, x9, x20 + umulh x20, x14, x15 + adcs x10, x10, x20 + umulh x15, x23, x15 + adcs x15, x5, x15 + mul x5, x19, x12 + adds x8, x5, x8 + ldr x5, [x1, #32] + str x8, [x0, #24] + mul x8, x6, x12 + adcs x8, x8, x16 + ldr x16, [x1] + adcs x3, x4, x3 + mul x4, x12, x12 + adcs x18, x4, x18 + mul x4, x13, x12 + adcs x9, x4, x9 + mul x4, x14, x12 + adcs x10, x4, x10 + mul x4, x23, x12 + umulh x19, x19, x12 + adcs x15, x4, x15 + adcs x4, xzr, xzr + adds x8, x8, x19 + ldr x19, [x1, #24] + umulh x6, x6, x12 + adcs x3, x3, x6 + ldr x6, [x1, #48] + adcs x18, x18, x21 + ldr x20, [x1, #48] + umulh x21, x23, x12 + umulh x14, x14, x12 + umulh x13, x13, x12 + umulh x12, x12, x12 + adcs x9, x9, x12 + adcs x10, x10, x13 + ldp x12, x13, [x1] + adcs x14, x15, x14 + mul x15, x16, x5 + adcs x4, x4, x21 + mul x21, x6, x5 + adds x8, x15, x8 + mul x15, x17, x5 + str x8, [x0, #32] + mul x8, x22, x5 + adcs x8, x8, x3 + mul x3, x7, x5 + adcs x18, x3, x18 + mul x3, x19, x5 + adcs x9, x3, x9 + mul x3, x5, x5 + adcs x10, x3, x10 + umulh x3, x16, x5 + adcs x14, x15, x14 + adcs x4, x21, x4 + adcs x21, xzr, xzr + adds x8, x8, x3 + umulh x3, x22, x5 + adcs x18, x18, x3 + umulh x3, x7, x5 + adcs x9, x9, x3 + umulh x3, x19, x5 + adcs x10, x10, x3 + umulh x3, x5, x5 + adcs x14, x14, x3 + umulh x3, x6, x5 + umulh x5, x17, x5 + adcs x4, x4, x5 + adcs x3, x21, x3 + mul x21, x16, x17 + adds x8, x21, x8 + ldp x21, x1, [x1, #16] + str x8, [x0, #40] + mul x8, x22, x17 + adcs x8, x8, x18 + mul x18, x7, x17 + adcs x9, x18, x9 + mul x18, x19, x17 + adcs x10, x18, x10 + mul x18, x6, x17 + adcs x14, x15, x14 + mul x15, x17, x17 + umulh x6, x6, x17 + umulh x19, x19, x17 + umulh x7, x7, x17 + umulh x22, x22, x17 + umulh x16, x16, x17 + umulh x17, x17, x17 + adcs x15, x15, x4 + mul x4, x12, x20 + adcs x18, x18, x3 + adcs x3, xzr, xzr + adds x8, x8, x16 + mul x16, x11, x20 + adcs x9, x9, x22 + mul x22, x2, x20 + adcs x10, x10, x7 + mul x7, x1, x20 + adcs x14, x14, x19 + mul x19, x21, x20 + adcs x15, x15, x5 + mul x5, x13, x20 + adcs x17, x18, x17 + mul x18, x20, x20 + umulh x12, x12, x20 + umulh x13, x13, x20 + umulh x21, x21, x20 + umulh x1, x1, x20 + umulh x2, x2, x20 + umulh x11, x11, x20 + umulh x20, x20, x20 + adcs x3, x3, x6 + adds x8, x4, x8 + str x8, [x0, #48] + adcs x8, x5, x9 + adcs x9, x19, x10 + adcs x10, x7, x14 + adcs x14, x22, x15 + adcs x15, x16, x17 + adcs x16, x18, x3 + adcs x17, xzr, xzr + adds x8, x8, x12 + str x8, [x0, #56] + adcs x8, x9, x13 + str x8, [x0, #64] + adcs x8, x10, x21 + str x8, [x0, #72] + adcs x8, x14, x1 + str x8, [x0, #80] + adcs x8, x15, x2 + str x8, [x0, #88] + adcs x8, x16, x11 + str x8, [x0, #96] + adcs x8, x17, x20 + str x8, [x0, #104] + ldp x20, x19, [sp, #32] + ldp x22, x21, [sp, #16] + ldp x24, x23, [sp], #48 + ret +.Lfunc_end97: + .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L + + .globl mcl_fp_mont7L + .align 2 + .type mcl_fp_mont7L,@function +mcl_fp_mont7L: // @mcl_fp_mont7L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #144 // =144 + str x2, [sp, #112] // 8-byte Folded Spill + str x0, [sp, #64] // 8-byte Folded Spill + ldr x6, [x2] + ldr x15, [x1, #48] + str x15, [sp, #96] // 8-byte Folded Spill + ldr x0, [x1, #32] + str x0, [sp, #56] // 8-byte Folded Spill + ldr x18, [x1, #40] + ldp x11, x13, [x1, #16] + ldp x17, x5, [x1] + str x5, [sp, #88] // 8-byte Folded Spill + ldur x12, [x3, #-8] + str x12, [sp, #128] // 8-byte Folded Spill + ldr x1, [x3, #32] + str x1, [sp, #104] // 8-byte Folded Spill + ldr x9, [x3, #40] + str x9, [sp, #80] // 8-byte Folded Spill + ldr x8, [x3, #16] + str x8, [sp, #136] // 8-byte Folded Spill + ldr x10, [x3, #24] + str x10, [sp, #120] // 8-byte Folded Spill + ldr x14, [x3] + str x14, [sp, #24] // 8-byte Folded Spill + ldr x4, [x3, #8] + str x4, [sp, #72] // 8-byte Folded Spill + ldr x7, [x2, #8] + umulh x19, x15, x6 + mul x20, x15, x6 + umulh x21, x18, x6 + mul x22, x18, x6 + mov x15, x0 + umulh x23, x15, x6 + mul x24, x15, x6 + mov x16, x13 + umulh x25, x16, x6 + mul x26, x16, x6 + mov x13, x11 + umulh x27, x13, x6 + mul x28, x13, x6 + mul x29, x5, x6 + mov x11, x17 + umulh x30, x11, x6 + adds x29, x30, x29 + umulh x30, x5, x6 + mul x6, x11, x6 + adcs x28, x30, x28 + mul x30, x6, x12 + adcs x26, x27, x26 + mul x27, x30, x10 + adcs x24, x25, x24 + mul x25, x30, x8 + adcs x22, x23, x22 + mul x23, x30, x4 + adcs x20, x21, x20 + umulh x21, x30, x14 + adcs x19, x19, xzr + adds x21, x21, x23 + umulh x23, x30, x4 + adcs x23, x23, x25 + umulh x25, x30, x8 + adcs x25, x25, x27 + mul x27, x30, x1 + umulh x17, x30, x10 + adcs x17, x17, x27 + ldr x3, [x3, #48] + str x3, [sp, #48] // 8-byte Folded Spill + mul x27, x30, x9 + umulh x0, x30, x1 + adcs x0, x0, x27 + mul x27, x30, x3 + umulh x2, x30, x9 + adcs x2, x2, x27 + umulh x27, x30, x3 + mul x30, x30, x14 + adcs x27, x27, xzr + cmn x30, x6 + adcs x6, x21, x29 + adcs x21, x23, x28 + mul x23, x7, x15 + adcs x25, x25, x26 + mul x26, x7, x16 + adcs x17, x17, x24 + mul x24, x7, x13 + adcs x0, x0, x22 + mul x22, x7, x5 + adcs x2, x2, x20 + umulh x20, x7, x11 + adcs x19, x27, x19 + adcs x27, xzr, xzr + adds x20, x20, x22 + umulh x22, x7, x5 + adcs x22, x22, x24 + umulh x24, x7, x13 + mov x5, x13 + adcs x24, x24, x26 + umulh x26, x7, x16 + adcs x23, x26, x23 + mul x26, x7, x18 + umulh x28, x7, x15 + adcs x26, x28, x26 + ldr x15, [sp, #96] // 8-byte Folded Reload + mul x28, x7, x15 + umulh x29, x7, x18 + adcs x28, x29, x28 + umulh x29, x7, x15 + mul x7, x7, x11 + adcs x29, x29, xzr + adds x30, x6, x7 + adcs x6, x21, x20 + adcs x25, x25, x22 + mul x22, x30, x12 + adcs x24, x17, x24 + mul x17, x22, x10 + adcs x0, x0, x23 + mul x23, x22, x8 + adcs x7, x2, x26 + mul x2, x22, x4 + adcs x20, x19, x28 + umulh x26, x22, x14 + adcs x21, x27, x29 + adcs x19, xzr, xzr + adds x2, x26, x2 + umulh x26, x22, x4 + adcs x23, x26, x23 + umulh x26, x22, x8 + adcs x17, x26, x17 + mul x26, x22, x1 + umulh x27, x22, x10 + adcs x26, x27, x26 + mul x27, x22, x9 + umulh x28, x22, x1 + adcs x27, x28, x27 + mul x28, x22, x3 + umulh x29, x22, x9 + adcs x28, x29, x28 + umulh x29, x22, x3 + mul x22, x22, x14 + mov x10, x14 + adcs x29, x29, xzr + cmn x22, x30 + adcs x22, x2, x6 + adcs x23, x23, x25 + ldr x8, [sp, #112] // 8-byte Folded Reload + adcs x24, x17, x24 + ldp x25, x17, [x8, #16] + adcs x0, x26, x0 + mul x2, x25, x16 + adcs x6, x27, x7 + mul x7, x25, x5 + adcs x20, x28, x20 + ldp x15, x8, [sp, #88] + mul x26, x25, x15 + adcs x21, x29, x21 + mov x12, x11 + umulh x27, x25, x12 + adcs x19, x19, xzr + adds x26, x27, x26 + umulh x27, x25, x15 + adcs x7, x27, x7 + umulh x27, x25, x5 + mov x9, x5 + adcs x2, x27, x2 + ldr x11, [sp, #56] // 8-byte Folded Reload + mul x27, x25, x11 + umulh x28, x25, x16 + mov x13, x16 + adcs x27, x28, x27 + mul x28, x25, x18 + umulh x29, x25, x11 + adcs x28, x29, x28 + mul x29, x25, x8 + umulh x30, x25, x18 + adcs x29, x30, x29 + umulh x30, x25, x8 + mov x14, x8 + mul x25, x25, x12 + mov x5, x12 + adcs x30, x30, xzr + adds x22, x22, x25 + adcs x23, x23, x26 + adcs x7, x24, x7 + adcs x0, x0, x2 + ldp x8, x12, [sp, #128] + mul x2, x22, x8 + adcs x6, x6, x27 + mul x24, x2, x12 + adcs x20, x20, x28 + mul x25, x2, x4 + adcs x21, x21, x29 + mov x1, x10 + umulh x26, x2, x1 + adcs x19, x19, x30 + adcs x27, xzr, xzr + adds x25, x26, x25 + umulh x26, x2, x4 + adcs x24, x26, x24 + ldr x10, [sp, #120] // 8-byte Folded Reload + mul x26, x2, x10 + umulh x28, x2, x12 + adcs x26, x28, x26 + ldr x12, [sp, #104] // 8-byte Folded Reload + mul x28, x2, x12 + umulh x29, x2, x10 + adcs x28, x29, x28 + ldr x10, [sp, #80] // 8-byte Folded Reload + mul x29, x2, x10 + umulh x30, x2, x12 + adcs x29, x30, x29 + mul x30, x2, x3 + umulh x12, x2, x10 + adcs x12, x12, x30 + umulh x30, x2, x3 + mul x2, x2, x1 + adcs x30, x30, xzr + cmn x2, x22 + adcs x2, x25, x23 + adcs x7, x24, x7 + adcs x0, x26, x0 + mul x22, x17, x11 + adcs x6, x28, x6 + mul x23, x17, x13 + adcs x20, x29, x20 + mul x24, x17, x9 + adcs x12, x12, x21 + mul x21, x17, x15 + adcs x19, x30, x19 + umulh x25, x17, x5 + adcs x26, x27, xzr + adds x21, x25, x21 + umulh x25, x17, x15 + adcs x24, x25, x24 + umulh x25, x17, x9 + mov x16, x9 + adcs x23, x25, x23 + umulh x25, x17, x13 + adcs x22, x25, x22 + mul x25, x17, x18 + umulh x27, x17, x11 + adcs x25, x27, x25 + mov x9, x14 + mul x27, x17, x9 + umulh x28, x17, x18 + adcs x27, x28, x27 + umulh x28, x17, x9 + mul x17, x17, x5 + mov x15, x5 + adcs x28, x28, xzr + adds x17, x2, x17 + adcs x2, x7, x21 + adcs x0, x0, x24 + mul x24, x17, x8 + adcs x29, x6, x23 + ldr x9, [sp, #120] // 8-byte Folded Reload + mul x23, x24, x9 + adcs x6, x20, x22 + ldr x8, [sp, #136] // 8-byte Folded Reload + mul x22, x24, x8 + adcs x7, x12, x25 + mul x12, x24, x4 + adcs x20, x19, x27 + umulh x25, x24, x1 + adcs x21, x26, x28 + adcs x19, xzr, xzr + adds x12, x25, x12 + umulh x25, x24, x4 + adcs x25, x25, x22 + umulh x22, x24, x8 + adcs x26, x22, x23 + ldr x5, [sp, #104] // 8-byte Folded Reload + mul x22, x24, x5 + umulh x23, x24, x9 + adcs x27, x23, x22 + mov x9, x10 + mul x22, x24, x9 + umulh x23, x24, x5 + adcs x28, x23, x22 + mul x22, x24, x3 + umulh x23, x24, x9 + adcs x30, x23, x22 + umulh x22, x24, x3 + mul x23, x24, x1 + mov x3, x1 + adcs x24, x22, xzr + cmn x23, x17 + adcs x22, x12, x2 + adcs x23, x25, x0 + ldr x10, [sp, #112] // 8-byte Folded Reload + ldp x12, x0, [x10, #32] + adcs x17, x26, x29 + adcs x2, x27, x6 + mul x6, x12, x13 + adcs x7, x28, x7 + mov x10, x16 + mul x25, x12, x10 + adcs x20, x30, x20 + ldr x16, [sp, #88] // 8-byte Folded Reload + mul x26, x12, x16 + adcs x21, x24, x21 + umulh x24, x12, x15 + adcs x1, x19, xzr + adds x24, x24, x26 + umulh x26, x12, x16 + adcs x25, x26, x25 + umulh x26, x12, x10 + adcs x6, x26, x6 + mul x26, x12, x11 + umulh x27, x12, x13 + adcs x26, x27, x26 + mul x27, x12, x18 + umulh x28, x12, x11 + adcs x27, x28, x27 + mul x28, x12, x14 + umulh x29, x12, x18 + adcs x28, x29, x28 + umulh x29, x12, x14 + mul x12, x12, x15 + adcs x29, x29, xzr + adds x12, x22, x12 + adcs x22, x23, x24 + adcs x17, x17, x25 + adcs x2, x2, x6 + ldr x19, [sp, #128] // 8-byte Folded Reload + mul x6, x12, x19 + adcs x7, x7, x26 + mov x30, x8 + mul x23, x6, x30 + adcs x20, x20, x27 + mul x24, x6, x4 + adcs x21, x21, x28 + mov x8, x3 + umulh x25, x6, x8 + adcs x1, x1, x29 + adcs x26, xzr, xzr + adds x24, x25, x24 + umulh x25, x6, x4 + adcs x23, x25, x23 + ldr x4, [sp, #120] // 8-byte Folded Reload + mul x25, x6, x4 + umulh x27, x6, x30 + adcs x25, x27, x25 + mul x27, x6, x5 + umulh x28, x6, x4 + adcs x27, x28, x27 + mov x3, x9 + mul x28, x6, x3 + umulh x29, x6, x5 + adcs x28, x29, x28 + ldr x9, [sp, #48] // 8-byte Folded Reload + mul x29, x6, x9 + umulh x30, x6, x3 + adcs x29, x30, x29 + umulh x30, x6, x9 + mov x3, x9 + mul x6, x6, x8 + mov x5, x8 + adcs x30, x30, xzr + cmn x6, x12 + adcs x12, x24, x22 + adcs x17, x23, x17 + adcs x2, x25, x2 + mul x6, x0, x11 + adcs x7, x27, x7 + mul x22, x0, x13 + adcs x20, x28, x20 + mul x23, x0, x10 + adcs x21, x29, x21 + mul x24, x0, x16 + adcs x29, x30, x1 + mov x1, x15 + umulh x25, x0, x1 + adcs x26, x26, xzr + adds x24, x25, x24 + umulh x25, x0, x16 + adcs x23, x25, x23 + umulh x25, x0, x10 + adcs x22, x25, x22 + umulh x25, x0, x13 + adcs x6, x25, x6 + mul x25, x0, x18 + umulh x27, x0, x11 + adcs x25, x27, x25 + mov x9, x14 + mul x27, x0, x9 + umulh x28, x0, x18 + adcs x27, x28, x27 + umulh x28, x0, x9 + mul x0, x0, x1 + adcs x28, x28, xzr + adds x12, x12, x0 + adcs x8, x17, x24 + str x8, [sp, #40] // 8-byte Folded Spill + adcs x8, x2, x23 + str x8, [sp, #32] // 8-byte Folded Spill + mul x2, x12, x19 + adcs x7, x7, x22 + mul x22, x2, x4 + adcs x8, x20, x6 + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #136] // 8-byte Folded Reload + mul x20, x2, x8 + adcs x21, x21, x25 + ldr x9, [sp, #72] // 8-byte Folded Reload + mul x23, x2, x9 + adcs x19, x29, x27 + mov x15, x5 + umulh x24, x2, x15 + adcs x17, x26, x28 + str x17, [sp, #8] // 8-byte Folded Spill + adcs x26, xzr, xzr + adds x23, x24, x23 + umulh x24, x2, x9 + adcs x20, x24, x20 + umulh x24, x2, x8 + adcs x22, x24, x22 + ldp x25, x8, [sp, #104] + mul x24, x2, x25 + umulh x27, x2, x4 + adcs x6, x27, x24 + ldr x5, [sp, #80] // 8-byte Folded Reload + mul x27, x2, x5 + umulh x28, x2, x25 + adcs x27, x28, x27 + mul x28, x2, x3 + umulh x29, x2, x5 + adcs x28, x29, x28 + ldr x29, [x8, #48] + mul x30, x2, x15 + umulh x2, x2, x3 + adcs x2, x2, xzr + cmn x30, x12 + umulh x24, x29, x14 + mul x30, x29, x14 + umulh x0, x29, x18 + mul x18, x29, x18 + umulh x17, x29, x11 + mul x15, x29, x11 + umulh x14, x29, x13 + mul x13, x29, x13 + umulh x12, x29, x10 + mul x11, x29, x10 + mul x10, x29, x16 + umulh x9, x29, x16 + umulh x8, x29, x1 + mul x29, x29, x1 + ldr x16, [sp, #40] // 8-byte Folded Reload + adcs x23, x23, x16 + ldr x16, [sp, #32] // 8-byte Folded Reload + adcs x20, x20, x16 + adcs x7, x22, x7 + ldr x16, [sp, #16] // 8-byte Folded Reload + adcs x6, x6, x16 + adcs x21, x27, x21 + adcs x19, x28, x19 + ldr x16, [sp, #8] // 8-byte Folded Reload + adcs x2, x2, x16 + adcs x22, x26, xzr + adds x8, x8, x10 + adcs x9, x9, x11 + adcs x10, x12, x13 + adcs x11, x14, x15 + adcs x12, x17, x18 + adcs x13, x0, x30 + adcs x14, x24, xzr + adds x15, x23, x29 + adcs x8, x20, x8 + ldr x16, [sp, #128] // 8-byte Folded Reload + mul x16, x15, x16 + adcs x9, x7, x9 + mul x17, x16, x3 + mul x18, x16, x5 + mul x0, x16, x25 + adcs x10, x6, x10 + mul x6, x16, x4 + adcs x11, x21, x11 + ldr x21, [sp, #136] // 8-byte Folded Reload + mul x7, x16, x21 + adcs x12, x19, x12 + ldr x23, [sp, #72] // 8-byte Folded Reload + mul x19, x16, x23 + adcs x13, x2, x13 + ldr x24, [sp, #24] // 8-byte Folded Reload + umulh x2, x16, x24 + adcs x14, x22, x14 + adcs x20, xzr, xzr + adds x2, x2, x19 + umulh x19, x16, x23 + adcs x7, x19, x7 + umulh x19, x16, x21 + adcs x6, x19, x6 + umulh x19, x16, x4 + adcs x0, x19, x0 + umulh x19, x16, x25 + adcs x18, x19, x18 + umulh x19, x16, x5 + adcs x17, x19, x17 + umulh x19, x16, x3 + mul x16, x16, x24 + adcs x19, x19, xzr + cmn x16, x15 + adcs x8, x2, x8 + adcs x9, x7, x9 + adcs x10, x6, x10 + adcs x11, x0, x11 + adcs x12, x18, x12 + adcs x13, x17, x13 + adcs x14, x19, x14 + adcs x15, x20, xzr + subs x16, x8, x24 + sbcs x17, x9, x23 + sbcs x18, x10, x21 + sbcs x0, x11, x4 + sbcs x1, x12, x25 + sbcs x2, x13, x5 + sbcs x3, x14, x3 + sbcs x15, x15, xzr + tst x15, #0x1 + csel x8, x8, x16, ne + csel x9, x9, x17, ne + csel x10, x10, x18, ne + csel x11, x11, x0, ne + csel x12, x12, x1, ne + csel x13, x13, x2, ne + csel x14, x14, x3, ne + ldr x15, [sp, #64] // 8-byte Folded Reload + stp x8, x9, [x15] + stp x10, x11, [x15, #16] + stp x12, x13, [x15, #32] + str x14, [x15, #48] + add sp, sp, #144 // =144 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end98: + .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L + + .globl mcl_fp_montNF7L + .align 2 + .type mcl_fp_montNF7L,@function +mcl_fp_montNF7L: // @mcl_fp_montNF7L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + sub sp, sp, #32 // =32 + stp x0, x2, [sp, #8] + ldr x7, [x2] + ldp x5, x16, [x1, #40] + ldp x6, x17, [x1, #24] + ldr x4, [x1] + ldp x1, x18, [x1, #8] + ldur x8, [x3, #-8] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x15, x0, [x3, #40] + ldp x11, x10, [x3, #24] + ldp x13, x12, [x3, #8] + ldr x14, [x3] + ldr x25, [x2, #8] + umulh x3, x16, x7 + mul x19, x16, x7 + umulh x20, x5, x7 + mul x21, x5, x7 + umulh x22, x17, x7 + mul x23, x17, x7 + umulh x24, x6, x7 + mul x26, x6, x7 + umulh x27, x18, x7 + mul x28, x18, x7 + mul x29, x1, x7 + umulh x30, x4, x7 + adds x29, x30, x29 + umulh x30, x1, x7 + mul x7, x4, x7 + adcs x28, x30, x28 + mul x30, x25, x5 + adcs x26, x27, x26 + mul x27, x25, x17 + adcs x23, x24, x23 + mul x24, x25, x6 + adcs x21, x22, x21 + mul x22, x7, x8 + adcs x19, x20, x19 + mul x20, x22, x14 + adcs x3, x3, xzr + cmn x20, x7 + mul x9, x25, x18 + mul x7, x22, x13 + adcs x7, x7, x29 + mul x20, x22, x12 + adcs x20, x20, x28 + mul x28, x22, x11 + adcs x26, x28, x26 + mul x28, x22, x10 + adcs x23, x28, x23 + mul x28, x22, x15 + adcs x21, x28, x21 + mul x28, x22, x0 + adcs x19, x28, x19 + umulh x28, x22, x14 + adcs x29, x3, xzr + adds x28, x7, x28 + umulh x3, x22, x13 + adcs x8, x20, x3 + umulh x3, x22, x12 + adcs x26, x26, x3 + umulh x3, x22, x11 + adcs x3, x23, x3 + umulh x7, x22, x10 + adcs x7, x21, x7 + umulh x20, x22, x15 + adcs x19, x19, x20 + mul x21, x25, x1 + umulh x20, x22, x0 + adcs x20, x29, x20 + umulh x22, x25, x4 + adds x29, x22, x21 + umulh x21, x25, x1 + adcs x23, x21, x9 + umulh x9, x25, x18 + adcs x21, x9, x24 + umulh x9, x25, x6 + adcs x22, x9, x27 + umulh x9, x25, x17 + adcs x30, x9, x30 + mul x9, x25, x16 + umulh x24, x25, x5 + adcs x24, x24, x9 + umulh x9, x25, x16 + mul x25, x25, x4 + adcs x9, x9, xzr + adds x27, x25, x28 + adcs x25, x29, x8 + ldp x28, x8, [x2, #16] + adcs x29, x23, x26 + adcs x3, x21, x3 + mul x21, x28, x17 + adcs x7, x22, x7 + mul x22, x28, x6 + adcs x19, x30, x19 + ldr x2, [sp, #24] // 8-byte Folded Reload + mul x23, x27, x2 + adcs x20, x24, x20 + mul x24, x23, x14 + adcs x9, x9, xzr + cmn x24, x27 + mul x24, x28, x18 + mul x26, x23, x13 + adcs x25, x26, x25 + mul x26, x23, x12 + adcs x26, x26, x29 + mul x27, x23, x11 + adcs x3, x27, x3 + mul x27, x23, x10 + adcs x7, x27, x7 + mul x27, x23, x15 + adcs x19, x27, x19 + mul x27, x23, x0 + adcs x20, x27, x20 + umulh x27, x23, x14 + adcs x9, x9, xzr + adds x25, x25, x27 + umulh x27, x23, x13 + adcs x26, x26, x27 + umulh x27, x23, x12 + adcs x3, x3, x27 + umulh x27, x23, x11 + adcs x7, x7, x27 + umulh x27, x23, x10 + adcs x19, x19, x27 + umulh x27, x23, x15 + adcs x20, x20, x27 + mul x27, x28, x1 + umulh x23, x23, x0 + adcs x9, x9, x23 + umulh x23, x28, x4 + adds x23, x23, x27 + umulh x27, x28, x1 + adcs x24, x27, x24 + umulh x27, x28, x18 + adcs x22, x27, x22 + umulh x27, x28, x6 + adcs x21, x27, x21 + mul x27, x28, x5 + umulh x29, x28, x17 + adcs x27, x29, x27 + mul x29, x28, x16 + umulh x30, x28, x5 + adcs x29, x30, x29 + umulh x30, x28, x16 + mul x28, x28, x4 + adcs x30, x30, xzr + adds x25, x28, x25 + adcs x23, x23, x26 + adcs x3, x24, x3 + mul x26, x8, x5 + adcs x7, x22, x7 + mul x22, x8, x17 + adcs x19, x21, x19 + mul x24, x8, x6 + adcs x20, x27, x20 + mul x21, x25, x2 + adcs x9, x29, x9 + mul x27, x21, x14 + adcs x28, x30, xzr + cmn x27, x25 + mul x25, x8, x18 + mul x27, x21, x13 + adcs x23, x27, x23 + mul x27, x21, x12 + adcs x3, x27, x3 + mul x27, x21, x11 + adcs x7, x27, x7 + mul x27, x21, x10 + adcs x19, x27, x19 + mul x27, x21, x15 + adcs x20, x27, x20 + mul x27, x21, x0 + adcs x9, x27, x9 + umulh x27, x21, x14 + adcs x28, x28, xzr + adds x27, x23, x27 + umulh x23, x21, x13 + adcs x3, x3, x23 + umulh x23, x21, x12 + adcs x30, x7, x23 + umulh x7, x21, x11 + adcs x7, x19, x7 + umulh x19, x21, x10 + adcs x19, x20, x19 + umulh x20, x21, x15 + adcs x20, x9, x20 + mul x9, x8, x1 + umulh x21, x21, x0 + adcs x21, x28, x21 + umulh x23, x8, x4 + adds x9, x23, x9 + umulh x23, x8, x1 + adcs x28, x23, x25 + umulh x23, x8, x18 + adcs x23, x23, x24 + umulh x24, x8, x6 + adcs x24, x24, x22 + umulh x22, x8, x17 + adcs x25, x22, x26 + mul x22, x8, x16 + umulh x26, x8, x5 + adcs x26, x26, x22 + umulh x22, x8, x16 + mul x29, x8, x4 + adcs x2, x22, xzr + adds x29, x29, x27 + adcs x27, x9, x3 + ldr x8, [sp, #16] // 8-byte Folded Reload + ldp x22, x3, [x8, #32] + adcs x9, x28, x30 + adcs x7, x23, x7 + mul x23, x22, x17 + adcs x19, x24, x19 + mul x24, x22, x6 + adcs x20, x25, x20 + ldr x8, [sp, #24] // 8-byte Folded Reload + mul x25, x29, x8 + adcs x21, x26, x21 + mul x26, x25, x14 + adcs x2, x2, xzr + cmn x26, x29 + mul x26, x22, x18 + mul x28, x25, x13 + adcs x27, x28, x27 + mul x28, x25, x12 + adcs x9, x28, x9 + mul x28, x25, x11 + adcs x7, x28, x7 + mul x28, x25, x10 + adcs x19, x28, x19 + mul x28, x25, x15 + adcs x20, x28, x20 + mul x28, x25, x0 + adcs x21, x28, x21 + umulh x28, x25, x14 + adcs x2, x2, xzr + adds x27, x27, x28 + umulh x28, x25, x13 + adcs x9, x9, x28 + umulh x28, x25, x12 + adcs x7, x7, x28 + umulh x28, x25, x11 + adcs x19, x19, x28 + umulh x28, x25, x10 + adcs x20, x20, x28 + umulh x28, x25, x15 + adcs x21, x21, x28 + mul x28, x22, x1 + umulh x25, x25, x0 + adcs x2, x2, x25 + umulh x25, x22, x4 + adds x25, x25, x28 + umulh x28, x22, x1 + adcs x26, x28, x26 + umulh x28, x22, x18 + adcs x24, x28, x24 + umulh x28, x22, x6 + adcs x23, x28, x23 + mul x28, x22, x5 + umulh x29, x22, x17 + adcs x28, x29, x28 + mul x29, x22, x16 + umulh x30, x22, x5 + adcs x29, x30, x29 + umulh x30, x22, x16 + mul x22, x22, x4 + adcs x30, x30, xzr + adds x22, x22, x27 + adcs x9, x25, x9 + adcs x7, x26, x7 + mul x25, x3, x5 + adcs x19, x24, x19 + mul x24, x3, x17 + adcs x20, x23, x20 + mul x23, x3, x6 + adcs x21, x28, x21 + mul x26, x22, x8 + adcs x8, x29, x2 + mul x27, x26, x14 + adcs x28, x30, xzr + cmn x27, x22 + mul x22, x3, x18 + mul x27, x26, x13 + adcs x9, x27, x9 + mul x27, x26, x12 + adcs x7, x27, x7 + mul x27, x26, x11 + adcs x19, x27, x19 + mul x27, x26, x10 + adcs x20, x27, x20 + mul x27, x26, x15 + adcs x21, x27, x21 + mul x27, x26, x0 + adcs x8, x27, x8 + umulh x27, x26, x14 + adcs x28, x28, xzr + adds x9, x9, x27 + umulh x27, x26, x13 + adcs x7, x7, x27 + umulh x27, x26, x12 + adcs x19, x19, x27 + umulh x27, x26, x11 + adcs x20, x20, x27 + umulh x27, x26, x10 + adcs x21, x21, x27 + umulh x27, x26, x15 + adcs x8, x8, x27 + mul x27, x3, x1 + umulh x26, x26, x0 + adcs x26, x28, x26 + umulh x28, x3, x4 + adds x27, x28, x27 + umulh x28, x3, x1 + adcs x22, x28, x22 + umulh x28, x3, x18 + adcs x23, x28, x23 + umulh x28, x3, x6 + adcs x24, x28, x24 + umulh x28, x3, x17 + adcs x25, x28, x25 + mul x28, x3, x16 + umulh x29, x3, x5 + adcs x28, x29, x28 + ldp x2, x30, [sp, #16] + ldr x2, [x2, #48] + umulh x29, x3, x16 + mul x3, x3, x4 + adcs x29, x29, xzr + adds x9, x3, x9 + adcs x3, x27, x7 + umulh x7, x2, x16 + mul x16, x2, x16 + adcs x19, x22, x19 + umulh x22, x2, x5 + mul x5, x2, x5 + adcs x20, x23, x20 + umulh x23, x2, x17 + mul x17, x2, x17 + adcs x21, x24, x21 + umulh x24, x2, x6 + mul x6, x2, x6 + adcs x8, x25, x8 + mul x25, x9, x30 + adcs x26, x28, x26 + mul x27, x25, x14 + adcs x28, x29, xzr + cmn x27, x9 + umulh x9, x2, x18 + mul x18, x2, x18 + umulh x27, x2, x1 + mul x1, x2, x1 + umulh x29, x2, x4 + mul x2, x2, x4 + mul x4, x25, x13 + adcs x3, x4, x3 + mul x4, x25, x12 + adcs x4, x4, x19 + mul x19, x25, x11 + adcs x19, x19, x20 + mul x20, x25, x10 + adcs x20, x20, x21 + mul x21, x25, x15 + adcs x8, x21, x8 + mul x21, x25, x0 + adcs x21, x21, x26 + adcs x26, x28, xzr + umulh x28, x25, x14 + adds x3, x3, x28 + umulh x28, x25, x13 + adcs x4, x4, x28 + umulh x28, x25, x12 + adcs x19, x19, x28 + umulh x28, x25, x11 + adcs x20, x20, x28 + umulh x28, x25, x10 + adcs x8, x8, x28 + umulh x28, x25, x15 + adcs x21, x21, x28 + umulh x25, x25, x0 + adcs x25, x26, x25 + adds x1, x29, x1 + adcs x18, x27, x18 + adcs x9, x9, x6 + adcs x17, x24, x17 + adcs x5, x23, x5 + adcs x16, x22, x16 + adcs x6, x7, xzr + adds x2, x2, x3 + adcs x1, x1, x4 + adcs x18, x18, x19 + adcs x9, x9, x20 + adcs x8, x17, x8 + adcs x17, x5, x21 + mul x3, x2, x30 + adcs x16, x16, x25 + mul x4, x3, x14 + adcs x5, x6, xzr + cmn x4, x2 + mul x2, x3, x13 + adcs x1, x2, x1 + mul x2, x3, x12 + adcs x18, x2, x18 + mul x2, x3, x11 + adcs x9, x2, x9 + mul x2, x3, x10 + adcs x8, x2, x8 + mul x2, x3, x15 + adcs x17, x2, x17 + mul x2, x3, x0 + adcs x16, x2, x16 + umulh x2, x3, x14 + adcs x4, x5, xzr + adds x1, x1, x2 + umulh x2, x3, x13 + adcs x18, x18, x2 + umulh x2, x3, x12 + adcs x9, x9, x2 + umulh x2, x3, x11 + adcs x8, x8, x2 + umulh x2, x3, x10 + adcs x17, x17, x2 + umulh x2, x3, x15 + adcs x16, x16, x2 + umulh x2, x3, x0 + adcs x2, x4, x2 + subs x14, x1, x14 + sbcs x13, x18, x13 + sbcs x12, x9, x12 + sbcs x11, x8, x11 + sbcs x10, x17, x10 + sbcs x15, x16, x15 + sbcs x0, x2, x0 + asr x3, x0, #63 + cmp x3, #0 // =0 + csel x14, x1, x14, lt + csel x13, x18, x13, lt + csel x9, x9, x12, lt + csel x8, x8, x11, lt + csel x10, x17, x10, lt + csel x11, x16, x15, lt + csel x12, x2, x0, lt + ldr x15, [sp, #8] // 8-byte Folded Reload + stp x14, x13, [x15] + stp x9, x8, [x15, #16] + stp x10, x11, [x15, #32] + str x12, [x15, #48] + add sp, sp, #32 // =32 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end99: + .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L + + .globl mcl_fp_montRed7L + .align 2 + .type mcl_fp_montRed7L,@function +mcl_fp_montRed7L: // @mcl_fp_montRed7L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + ldur x15, [x2, #-8] + ldp x9, x8, [x2, #40] + ldp x11, x10, [x2, #24] + ldp x13, x12, [x2, #8] + ldr x14, [x2] + ldp x17, x18, [x1, #96] + ldp x2, x3, [x1, #80] + ldp x4, x5, [x1, #64] + ldp x6, x7, [x1, #48] + ldp x19, x20, [x1, #32] + ldp x21, x22, [x1, #16] + ldp x16, x1, [x1] + mul x23, x16, x15 + mul x24, x23, x8 + mul x25, x23, x9 + mul x26, x23, x10 + mul x27, x23, x11 + mul x28, x23, x12 + mul x29, x23, x13 + umulh x30, x23, x14 + adds x29, x30, x29 + umulh x30, x23, x13 + adcs x28, x30, x28 + umulh x30, x23, x12 + adcs x27, x30, x27 + umulh x30, x23, x11 + adcs x26, x30, x26 + umulh x30, x23, x10 + adcs x25, x30, x25 + umulh x30, x23, x9 + adcs x24, x30, x24 + umulh x30, x23, x8 + mul x23, x23, x14 + adcs x30, x30, xzr + cmn x16, x23 + adcs x16, x1, x29 + adcs x1, x21, x28 + mul x21, x16, x15 + adcs x22, x22, x27 + mul x23, x21, x8 + mul x27, x21, x9 + mul x28, x21, x10 + mul x29, x21, x11 + adcs x19, x19, x26 + mul x26, x21, x12 + adcs x20, x20, x25 + mul x25, x21, x13 + adcs x6, x6, x24 + umulh x24, x21, x14 + adcs x7, x7, x30 + adcs x4, x4, xzr + adcs x5, x5, xzr + adcs x2, x2, xzr + adcs x3, x3, xzr + adcs x17, x17, xzr + adcs x18, x18, xzr + adcs x30, xzr, xzr + adds x24, x24, x25 + umulh x25, x21, x13 + adcs x25, x25, x26 + umulh x26, x21, x12 + adcs x26, x26, x29 + umulh x29, x21, x11 + adcs x28, x29, x28 + umulh x29, x21, x10 + adcs x27, x29, x27 + umulh x29, x21, x9 + adcs x23, x29, x23 + umulh x29, x21, x8 + mul x21, x21, x14 + adcs x29, x29, xzr + cmn x21, x16 + adcs x16, x24, x1 + adcs x1, x25, x22 + mul x21, x16, x15 + adcs x19, x26, x19 + mul x22, x21, x8 + mul x24, x21, x9 + mul x25, x21, x10 + adcs x20, x28, x20 + mul x26, x21, x11 + adcs x6, x27, x6 + mul x27, x21, x12 + adcs x7, x23, x7 + mul x23, x21, x13 + adcs x4, x29, x4 + umulh x28, x21, x14 + adcs x5, x5, xzr + adcs x2, x2, xzr + adcs x3, x3, xzr + adcs x17, x17, xzr + adcs x18, x18, xzr + adcs x29, x30, xzr + adds x23, x28, x23 + umulh x28, x21, x13 + adcs x27, x28, x27 + umulh x28, x21, x12 + adcs x26, x28, x26 + umulh x28, x21, x11 + adcs x25, x28, x25 + umulh x28, x21, x10 + adcs x24, x28, x24 + umulh x28, x21, x9 + adcs x22, x28, x22 + umulh x28, x21, x8 + mul x21, x21, x14 + adcs x28, x28, xzr + cmn x21, x16 + adcs x16, x23, x1 + adcs x1, x27, x19 + mul x19, x16, x15 + adcs x20, x26, x20 + mul x21, x19, x8 + mul x23, x19, x9 + mul x26, x19, x10 + adcs x6, x25, x6 + mul x25, x19, x11 + adcs x7, x24, x7 + mul x24, x19, x12 + adcs x4, x22, x4 + mul x22, x19, x13 + adcs x5, x28, x5 + umulh x27, x19, x14 + adcs x2, x2, xzr + adcs x3, x3, xzr + adcs x17, x17, xzr + adcs x18, x18, xzr + adcs x28, x29, xzr + adds x22, x27, x22 + umulh x27, x19, x13 + adcs x24, x27, x24 + umulh x27, x19, x12 + adcs x25, x27, x25 + umulh x27, x19, x11 + adcs x26, x27, x26 + umulh x27, x19, x10 + adcs x23, x27, x23 + umulh x27, x19, x9 + adcs x21, x27, x21 + umulh x27, x19, x8 + mul x19, x19, x14 + adcs x27, x27, xzr + cmn x19, x16 + adcs x16, x22, x1 + adcs x1, x24, x20 + mul x19, x16, x15 + adcs x6, x25, x6 + mul x20, x19, x8 + mul x22, x19, x9 + mul x24, x19, x10 + adcs x7, x26, x7 + mul x25, x19, x11 + adcs x4, x23, x4 + mul x23, x19, x12 + adcs x5, x21, x5 + mul x21, x19, x13 + adcs x2, x27, x2 + umulh x26, x19, x14 + adcs x3, x3, xzr + adcs x17, x17, xzr + adcs x18, x18, xzr + adcs x27, x28, xzr + adds x21, x26, x21 + umulh x26, x19, x13 + adcs x23, x26, x23 + umulh x26, x19, x12 + adcs x25, x26, x25 + umulh x26, x19, x11 + adcs x24, x26, x24 + umulh x26, x19, x10 + adcs x22, x26, x22 + umulh x26, x19, x9 + adcs x20, x26, x20 + umulh x26, x19, x8 + mul x19, x19, x14 + adcs x26, x26, xzr + cmn x19, x16 + adcs x16, x21, x1 + adcs x1, x23, x6 + mul x6, x16, x15 + adcs x7, x25, x7 + mul x19, x6, x8 + mul x21, x6, x9 + mul x23, x6, x10 + adcs x4, x24, x4 + mul x24, x6, x11 + adcs x5, x22, x5 + mul x22, x6, x12 + adcs x2, x20, x2 + mul x20, x6, x13 + adcs x3, x26, x3 + umulh x25, x6, x14 + adcs x17, x17, xzr + adcs x18, x18, xzr + adcs x26, x27, xzr + adds x20, x25, x20 + umulh x25, x6, x13 + adcs x22, x25, x22 + umulh x25, x6, x12 + adcs x24, x25, x24 + umulh x25, x6, x11 + adcs x23, x25, x23 + umulh x25, x6, x10 + adcs x21, x25, x21 + umulh x25, x6, x9 + adcs x19, x25, x19 + umulh x25, x6, x8 + mul x6, x6, x14 + adcs x25, x25, xzr + cmn x6, x16 + adcs x16, x20, x1 + adcs x1, x22, x7 + mul x15, x16, x15 + adcs x4, x24, x4 + mul x6, x15, x8 + mul x7, x15, x9 + mul x20, x15, x10 + adcs x5, x23, x5 + mul x22, x15, x11 + adcs x2, x21, x2 + mul x21, x15, x12 + adcs x3, x19, x3 + mul x19, x15, x13 + adcs x17, x25, x17 + umulh x23, x15, x14 + adcs x18, x18, xzr + adcs x24, x26, xzr + adds x19, x23, x19 + umulh x23, x15, x13 + adcs x21, x23, x21 + umulh x23, x15, x12 + adcs x22, x23, x22 + umulh x23, x15, x11 + adcs x20, x23, x20 + umulh x23, x15, x10 + adcs x7, x23, x7 + umulh x23, x15, x9 + adcs x6, x23, x6 + umulh x23, x15, x8 + mul x15, x15, x14 + adcs x23, x23, xzr + cmn x15, x16 + adcs x15, x19, x1 + adcs x16, x21, x4 + adcs x1, x22, x5 + adcs x2, x20, x2 + adcs x3, x7, x3 + adcs x17, x6, x17 + adcs x18, x23, x18 + adcs x4, x24, xzr + subs x14, x15, x14 + sbcs x13, x16, x13 + sbcs x12, x1, x12 + sbcs x11, x2, x11 + sbcs x10, x3, x10 + sbcs x9, x17, x9 + sbcs x8, x18, x8 + sbcs x4, x4, xzr + tst x4, #0x1 + csel x14, x15, x14, ne + csel x13, x16, x13, ne + csel x12, x1, x12, ne + csel x11, x2, x11, ne + csel x10, x3, x10, ne + csel x9, x17, x9, ne + csel x8, x18, x8, ne + stp x14, x13, [x0] + stp x12, x11, [x0, #16] + stp x10, x9, [x0, #32] + str x8, [x0, #48] + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end100: + .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L + + .globl mcl_fp_addPre7L + .align 2 + .type mcl_fp_addPre7L,@function +mcl_fp_addPre7L: // @mcl_fp_addPre7L +// BB#0: + ldp x11, x8, [x2, #40] + ldp x13, x9, [x1, #40] + ldp x15, x10, [x2, #24] + ldp x17, x14, [x2, #8] + ldr x16, [x2] + ldp x18, x2, [x1] + ldr x3, [x1, #16] + ldp x1, x12, [x1, #24] + adds x16, x16, x18 + str x16, [x0] + adcs x16, x17, x2 + adcs x14, x14, x3 + stp x16, x14, [x0, #8] + adcs x14, x15, x1 + adcs x10, x10, x12 + stp x14, x10, [x0, #24] + adcs x10, x11, x13 + adcs x9, x8, x9 + adcs x8, xzr, xzr + stp x10, x9, [x0, #40] + mov x0, x8 + ret +.Lfunc_end101: + .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L + + .globl mcl_fp_subPre7L + .align 2 + .type mcl_fp_subPre7L,@function +mcl_fp_subPre7L: // @mcl_fp_subPre7L +// BB#0: + ldp x11, x8, [x2, #40] + ldp x13, x9, [x1, #40] + ldp x15, x10, [x2, #24] + ldp x17, x14, [x2, #8] + ldr x16, [x2] + ldp x18, x2, [x1] + ldr x3, [x1, #16] + ldp x1, x12, [x1, #24] + subs x16, x18, x16 + str x16, [x0] + sbcs x16, x2, x17 + sbcs x14, x3, x14 + stp x16, x14, [x0, #8] + sbcs x14, x1, x15 + sbcs x10, x12, x10 + stp x14, x10, [x0, #24] + sbcs x10, x13, x11 + sbcs x9, x9, x8 + ngcs x8, xzr + and x8, x8, #0x1 + stp x10, x9, [x0, #40] + mov x0, x8 + ret +.Lfunc_end102: + .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L + + .globl mcl_fp_shr1_7L + .align 2 + .type mcl_fp_shr1_7L,@function +mcl_fp_shr1_7L: // @mcl_fp_shr1_7L +// BB#0: + ldp x8, x9, [x1] + ldp x14, x10, [x1, #40] + ldp x11, x12, [x1, #16] + ldr x13, [x1, #32] + extr x8, x9, x8, #1 + extr x9, x11, x9, #1 + extr x11, x12, x11, #1 + extr x12, x13, x12, #1 + extr x13, x14, x13, #1 + extr x14, x10, x14, #1 + lsr x10, x10, #1 + stp x8, x9, [x0] + stp x11, x12, [x0, #16] + stp x13, x14, [x0, #32] + str x10, [x0, #48] + ret +.Lfunc_end103: + .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L + + .globl mcl_fp_add7L + .align 2 + .type mcl_fp_add7L,@function +mcl_fp_add7L: // @mcl_fp_add7L +// BB#0: + ldp x11, x8, [x2, #40] + ldp x13, x9, [x1, #40] + ldp x15, x10, [x2, #24] + ldp x17, x14, [x2, #8] + ldr x16, [x2] + ldp x18, x2, [x1] + ldr x4, [x1, #16] + ldp x1, x12, [x1, #24] + adds x16, x16, x18 + ldp x5, x18, [x3, #40] + adcs x17, x17, x2 + adcs x2, x14, x4 + ldr x4, [x3, #32] + adcs x15, x15, x1 + adcs x10, x10, x12 + ldp x12, x1, [x3] + stp x16, x17, [x0] + stp x2, x15, [x0, #16] + adcs x6, x11, x13 + stp x10, x6, [x0, #32] + adcs x8, x8, x9 + str x8, [x0, #48] + adcs x7, xzr, xzr + ldp x9, x11, [x3, #16] + subs x14, x16, x12 + sbcs x13, x17, x1 + sbcs x12, x2, x9 + sbcs x11, x15, x11 + sbcs x10, x10, x4 + sbcs x9, x6, x5 + sbcs x8, x8, x18 + sbcs x15, x7, xzr + and w15, w15, #0x1 + tbnz w15, #0, .LBB104_2 +// BB#1: // %nocarry + stp x14, x13, [x0] + stp x12, x11, [x0, #16] + stp x10, x9, [x0, #32] + str x8, [x0, #48] +.LBB104_2: // %carry + ret +.Lfunc_end104: + .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L + + .globl mcl_fp_addNF7L + .align 2 + .type mcl_fp_addNF7L,@function +mcl_fp_addNF7L: // @mcl_fp_addNF7L +// BB#0: + ldp x11, x8, [x1, #40] + ldp x13, x9, [x2, #40] + ldp x15, x10, [x1, #24] + ldp x17, x14, [x1, #8] + ldr x16, [x1] + ldp x18, x1, [x2] + ldr x4, [x2, #16] + ldp x2, x12, [x2, #24] + adds x16, x18, x16 + adcs x17, x1, x17 + adcs x14, x4, x14 + ldp x4, x18, [x3, #40] + adcs x15, x2, x15 + adcs x10, x12, x10 + ldp x12, x2, [x3] + adcs x11, x13, x11 + ldr x13, [x3, #16] + ldp x3, x1, [x3, #24] + adcs x8, x9, x8 + subs x9, x16, x12 + sbcs x12, x17, x2 + sbcs x13, x14, x13 + sbcs x2, x15, x3 + sbcs x1, x10, x1 + sbcs x3, x11, x4 + sbcs x18, x8, x18 + asr x4, x18, #63 + cmp x4, #0 // =0 + csel x9, x16, x9, lt + csel x12, x17, x12, lt + csel x13, x14, x13, lt + csel x14, x15, x2, lt + csel x10, x10, x1, lt + csel x11, x11, x3, lt + csel x8, x8, x18, lt + stp x9, x12, [x0] + stp x13, x14, [x0, #16] + stp x10, x11, [x0, #32] + str x8, [x0, #48] + ret +.Lfunc_end105: + .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L + + .globl mcl_fp_sub7L + .align 2 + .type mcl_fp_sub7L,@function +mcl_fp_sub7L: // @mcl_fp_sub7L +// BB#0: + ldp x13, x14, [x2, #40] + ldp x17, x15, [x1, #40] + ldp x11, x12, [x2, #24] + ldp x9, x10, [x2, #8] + ldr x8, [x2] + ldp x18, x2, [x1] + ldr x4, [x1, #16] + ldp x1, x16, [x1, #24] + subs x8, x18, x8 + sbcs x9, x2, x9 + stp x8, x9, [x0] + sbcs x10, x4, x10 + sbcs x11, x1, x11 + stp x10, x11, [x0, #16] + sbcs x12, x16, x12 + sbcs x13, x17, x13 + stp x12, x13, [x0, #32] + sbcs x14, x15, x14 + str x14, [x0, #48] + ngcs x15, xzr + and w15, w15, #0x1 + tbnz w15, #0, .LBB106_2 +// BB#1: // %nocarry + ret +.LBB106_2: // %carry + ldp x16, x17, [x3] + ldp x18, x1, [x3, #16] + ldr x2, [x3, #32] + ldp x3, x15, [x3, #40] + adds x8, x16, x8 + adcs x9, x17, x9 + adcs x10, x18, x10 + adcs x11, x1, x11 + adcs x12, x2, x12 + adcs x13, x3, x13 + adcs x14, x15, x14 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + stp x12, x13, [x0, #32] + str x14, [x0, #48] + ret +.Lfunc_end106: + .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L + + .globl mcl_fp_subNF7L + .align 2 + .type mcl_fp_subNF7L,@function +mcl_fp_subNF7L: // @mcl_fp_subNF7L +// BB#0: + ldp x11, x8, [x2, #40] + ldp x13, x9, [x1, #40] + ldp x15, x10, [x2, #24] + ldp x17, x14, [x2, #8] + ldr x16, [x2] + ldp x18, x2, [x1] + ldr x4, [x1, #16] + ldp x1, x12, [x1, #24] + subs x16, x18, x16 + sbcs x17, x2, x17 + sbcs x14, x4, x14 + ldp x4, x18, [x3, #40] + sbcs x15, x1, x15 + sbcs x10, x12, x10 + ldp x12, x1, [x3] + sbcs x11, x13, x11 + ldr x13, [x3, #16] + ldp x3, x2, [x3, #24] + sbcs x8, x9, x8 + asr x9, x8, #63 + and x1, x9, x1 + and x13, x9, x13 + and x3, x9, x3 + and x2, x9, x2 + and x4, x9, x4 + and x18, x9, x18 + extr x9, x9, x8, #63 + and x9, x9, x12 + adds x9, x9, x16 + str x9, [x0] + adcs x9, x1, x17 + str x9, [x0, #8] + adcs x9, x13, x14 + str x9, [x0, #16] + adcs x9, x3, x15 + str x9, [x0, #24] + adcs x9, x2, x10 + str x9, [x0, #32] + adcs x9, x4, x11 + adcs x8, x18, x8 + stp x9, x8, [x0, #40] + ret +.Lfunc_end107: + .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L + + .globl mcl_fpDbl_add7L + .align 2 + .type mcl_fpDbl_add7L,@function +mcl_fpDbl_add7L: // @mcl_fpDbl_add7L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + ldp x8, x9, [x2, #96] + ldp x10, x11, [x1, #96] + ldp x12, x13, [x2, #80] + ldp x14, x15, [x1, #80] + ldp x16, x17, [x2, #64] + ldp x18, x4, [x1, #64] + ldp x5, x6, [x2, #48] + ldp x7, x19, [x1, #48] + ldp x20, x21, [x2, #32] + ldp x22, x23, [x1, #32] + ldp x24, x25, [x2, #16] + ldp x27, x2, [x2] + ldp x28, x29, [x1, #16] + ldp x26, x1, [x1] + adds x26, x27, x26 + ldr x27, [x3, #48] + str x26, [x0] + adcs x1, x2, x1 + ldp x2, x26, [x3, #32] + str x1, [x0, #8] + adcs x1, x24, x28 + ldp x24, x28, [x3, #16] + str x1, [x0, #16] + ldp x1, x3, [x3] + adcs x25, x25, x29 + adcs x20, x20, x22 + stp x25, x20, [x0, #24] + adcs x20, x21, x23 + adcs x5, x5, x7 + stp x20, x5, [x0, #40] + adcs x5, x6, x19 + adcs x16, x16, x18 + adcs x17, x17, x4 + adcs x12, x12, x14 + adcs x13, x13, x15 + adcs x8, x8, x10 + adcs x9, x9, x11 + adcs x10, xzr, xzr + subs x11, x5, x1 + sbcs x14, x16, x3 + sbcs x15, x17, x24 + sbcs x18, x12, x28 + sbcs x1, x13, x2 + sbcs x2, x8, x26 + sbcs x3, x9, x27 + sbcs x10, x10, xzr + tst x10, #0x1 + csel x10, x5, x11, ne + csel x11, x16, x14, ne + csel x14, x17, x15, ne + csel x12, x12, x18, ne + csel x13, x13, x1, ne + csel x8, x8, x2, ne + csel x9, x9, x3, ne + stp x10, x11, [x0, #56] + stp x14, x12, [x0, #72] + stp x13, x8, [x0, #88] + str x9, [x0, #104] + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end108: + .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L + + .globl mcl_fpDbl_sub7L + .align 2 + .type mcl_fpDbl_sub7L,@function +mcl_fpDbl_sub7L: // @mcl_fpDbl_sub7L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + ldp x9, x8, [x2, #96] + ldp x11, x10, [x1, #96] + ldp x12, x13, [x2, #80] + ldp x14, x15, [x1, #80] + ldp x16, x17, [x2, #64] + ldp x18, x4, [x1, #64] + ldp x5, x6, [x2, #48] + ldp x7, x19, [x1, #48] + ldp x20, x21, [x2, #32] + ldp x22, x23, [x1, #32] + ldp x24, x25, [x2, #16] + ldp x26, x2, [x2] + ldp x28, x29, [x1, #16] + ldp x27, x1, [x1] + subs x26, x27, x26 + ldr x27, [x3, #48] + str x26, [x0] + sbcs x1, x1, x2 + ldp x2, x26, [x3, #32] + str x1, [x0, #8] + sbcs x1, x28, x24 + ldp x24, x28, [x3, #16] + str x1, [x0, #16] + ldp x1, x3, [x3] + sbcs x25, x29, x25 + sbcs x20, x22, x20 + stp x25, x20, [x0, #24] + sbcs x20, x23, x21 + sbcs x5, x7, x5 + stp x20, x5, [x0, #40] + sbcs x5, x19, x6 + sbcs x16, x18, x16 + sbcs x17, x4, x17 + sbcs x12, x14, x12 + sbcs x13, x15, x13 + sbcs x9, x11, x9 + sbcs x8, x10, x8 + ngcs x10, xzr + tst x10, #0x1 + csel x10, x27, xzr, ne + csel x11, x26, xzr, ne + csel x14, x2, xzr, ne + csel x15, x28, xzr, ne + csel x18, x24, xzr, ne + csel x2, x3, xzr, ne + csel x1, x1, xzr, ne + adds x1, x1, x5 + adcs x16, x2, x16 + stp x1, x16, [x0, #56] + adcs x16, x18, x17 + adcs x12, x15, x12 + stp x16, x12, [x0, #72] + adcs x12, x14, x13 + adcs x9, x11, x9 + stp x12, x9, [x0, #88] + adcs x8, x10, x8 + str x8, [x0, #104] + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end109: + .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L + + .align 2 + .type .LmulPv512x64,@function +.LmulPv512x64: // @mulPv512x64 +// BB#0: + ldr x9, [x0] + mul x10, x9, x1 + str x10, [x8] + ldr x10, [x0, #8] + umulh x9, x9, x1 + mul x11, x10, x1 + adds x9, x9, x11 + str x9, [x8, #8] + ldr x9, [x0, #16] + umulh x10, x10, x1 + mul x11, x9, x1 + adcs x10, x10, x11 + str x10, [x8, #16] + ldr x10, [x0, #24] + umulh x9, x9, x1 + mul x11, x10, x1 + adcs x9, x9, x11 + str x9, [x8, #24] + ldr x9, [x0, #32] + umulh x10, x10, x1 + mul x11, x9, x1 + adcs x10, x10, x11 + str x10, [x8, #32] + ldr x10, [x0, #40] + umulh x9, x9, x1 + mul x11, x10, x1 + adcs x9, x9, x11 + str x9, [x8, #40] + ldr x9, [x0, #48] + umulh x10, x10, x1 + mul x11, x9, x1 + adcs x10, x10, x11 + str x10, [x8, #48] + ldr x10, [x0, #56] + umulh x9, x9, x1 + mul x11, x10, x1 + umulh x10, x10, x1 + adcs x9, x9, x11 + str x9, [x8, #56] + adcs x9, x10, xzr + str x9, [x8, #64] + ret +.Lfunc_end110: + .size .LmulPv512x64, .Lfunc_end110-.LmulPv512x64 + + .globl mcl_fp_mulUnitPre8L + .align 2 + .type mcl_fp_mulUnitPre8L,@function +mcl_fp_mulUnitPre8L: // @mcl_fp_mulUnitPre8L +// BB#0: + stp x20, x19, [sp, #-32]! + stp x29, x30, [sp, #16] + add x29, sp, #16 // =16 + sub sp, sp, #80 // =80 + mov x19, x0 + mov x8, sp + mov x0, x1 + mov x1, x2 + bl .LmulPv512x64 + ldp x9, x8, [sp, #56] + ldp x11, x10, [sp, #40] + ldp x16, x12, [sp, #24] + ldp x13, x14, [sp] + ldr x15, [sp, #16] + stp x13, x14, [x19] + stp x15, x16, [x19, #16] + stp x12, x11, [x19, #32] + stp x10, x9, [x19, #48] + str x8, [x19, #64] + sub sp, x29, #16 // =16 + ldp x29, x30, [sp, #16] + ldp x20, x19, [sp], #32 + ret +.Lfunc_end111: + .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L + + .globl mcl_fpDbl_mulPre8L + .align 2 + .type mcl_fpDbl_mulPre8L,@function +mcl_fpDbl_mulPre8L: // @mcl_fpDbl_mulPre8L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #144 // =144 + mov x20, x2 + mov x21, x1 + mov x19, x0 + bl mcl_fpDbl_mulPre4L + add x0, x19, #64 // =64 + add x1, x21, #32 // =32 + add x2, x20, #32 // =32 + bl mcl_fpDbl_mulPre4L + ldp x8, x9, [x20, #48] + ldp x10, x11, [x20, #32] + ldp x12, x13, [x20] + ldp x14, x15, [x20, #16] + adds x18, x12, x10 + str x18, [sp, #8] // 8-byte Folded Spill + ldp x10, x12, [x21, #16] + ldp x16, x17, [x21, #48] + adcs x22, x13, x11 + ldp x11, x13, [x21] + adcs x23, x14, x8 + ldp x8, x14, [x21, #32] + stp x18, x22, [sp, #16] + adcs x21, x15, x9 + stp x23, x21, [sp, #32] + adcs x24, xzr, xzr + adds x25, x11, x8 + adcs x26, x13, x14 + stp x25, x26, [sp, #48] + adcs x27, x10, x16 + adcs x28, x12, x17 + stp x27, x28, [sp, #64] + adcs x20, xzr, xzr + add x0, sp, #80 // =80 + add x1, sp, #48 // =48 + add x2, sp, #16 // =16 + bl mcl_fpDbl_mulPre4L + cmp x24, #0 // =0 + csel x8, x28, xzr, ne + and x9, x24, x20 + ldp x11, x10, [sp, #128] + ldp x13, x12, [sp, #112] + ldp x14, x15, [x19, #48] + ldp x16, x17, [x19, #32] + ldp x18, x0, [x19, #16] + csel x1, x27, xzr, ne + csel x2, x26, xzr, ne + csel x3, x25, xzr, ne + cmp x20, #0 // =0 + ldp x4, x5, [x19] + csel x6, x21, xzr, ne + csel x7, x23, xzr, ne + csel x20, x22, xzr, ne + ldr x21, [sp, #8] // 8-byte Folded Reload + csel x21, x21, xzr, ne + adds x3, x21, x3 + adcs x2, x20, x2 + ldp x20, x21, [sp, #96] + adcs x1, x7, x1 + adcs x8, x6, x8 + adcs x6, xzr, xzr + adds x13, x3, x13 + ldp x3, x7, [sp, #80] + adcs x12, x2, x12 + adcs x11, x1, x11 + ldp x1, x2, [x19, #112] + adcs x8, x8, x10 + adcs x9, x6, x9 + ldp x10, x6, [x19, #96] + subs x3, x3, x4 + sbcs x4, x7, x5 + ldp x5, x7, [x19, #80] + sbcs x18, x20, x18 + sbcs x0, x21, x0 + ldp x20, x21, [x19, #64] + sbcs x13, x13, x16 + sbcs x12, x12, x17 + sbcs x11, x11, x14 + sbcs x8, x8, x15 + sbcs x9, x9, xzr + subs x3, x3, x20 + sbcs x4, x4, x21 + sbcs x18, x18, x5 + sbcs x0, x0, x7 + sbcs x13, x13, x10 + sbcs x12, x12, x6 + sbcs x11, x11, x1 + sbcs x8, x8, x2 + sbcs x9, x9, xzr + adds x16, x16, x3 + str x16, [x19, #32] + adcs x16, x17, x4 + adcs x14, x14, x18 + stp x16, x14, [x19, #40] + adcs x14, x15, x0 + adcs x13, x20, x13 + stp x14, x13, [x19, #56] + adcs x12, x21, x12 + adcs x11, x5, x11 + stp x12, x11, [x19, #72] + adcs x8, x7, x8 + str x8, [x19, #88] + adcs x8, x10, x9 + str x8, [x19, #96] + adcs x8, x6, xzr + str x8, [x19, #104] + adcs x8, x1, xzr + str x8, [x19, #112] + adcs x8, x2, xzr + str x8, [x19, #120] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end112: + .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L + + .globl mcl_fpDbl_sqrPre8L + .align 2 + .type mcl_fpDbl_sqrPre8L,@function +mcl_fpDbl_sqrPre8L: // @mcl_fpDbl_sqrPre8L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #128 // =128 + mov x20, x1 + mov x19, x0 + mov x2, x20 + bl mcl_fpDbl_mulPre4L + add x0, x19, #64 // =64 + add x1, x20, #32 // =32 + mov x2, x1 + bl mcl_fpDbl_mulPre4L + ldp x8, x9, [x20, #16] + ldp x10, x11, [x20, #32] + ldp x12, x13, [x20] + ldp x14, x15, [x20, #48] + adds x22, x12, x10 + adcs x23, x13, x11 + adcs x20, x8, x14 + adcs x21, x9, x15 + stp x22, x23, [sp, #32] + stp x22, x23, [sp] + stp x20, x21, [sp, #48] + stp x20, x21, [sp, #16] + adcs x24, xzr, xzr + add x0, sp, #64 // =64 + add x1, sp, #32 // =32 + mov x2, sp + bl mcl_fpDbl_mulPre4L + ldp x8, x9, [x19, #48] + ldp x10, x11, [x19] + ldp x12, x13, [sp, #64] + ldp x14, x15, [x19, #16] + ldp x16, x17, [sp, #80] + ldp x18, x0, [x19, #32] + subs x10, x12, x10 + ldp x1, x12, [sp, #96] + sbcs x11, x13, x11 + sbcs x14, x16, x14 + ldp x13, x16, [sp, #112] + sbcs x15, x17, x15 + sbcs x17, x1, x18 + ldp x1, x2, [x19, #64] + ldp x3, x4, [x19, #80] + ldp x5, x6, [x19, #96] + ldp x7, x25, [x19, #112] + lsr x26, x21, #63 + sbcs x12, x12, x0 + sbcs x13, x13, x8 + sbcs x16, x16, x9 + sbcs x27, x24, xzr + subs x10, x10, x1 + sbcs x11, x11, x2 + sbcs x14, x14, x3 + sbcs x15, x15, x4 + sbcs x17, x17, x5 + sbcs x12, x12, x6 + sbcs x13, x13, x7 + sbcs x16, x16, x25 + sbcs x27, x27, xzr + adds x22, x22, x22 + adcs x23, x23, x23 + adcs x20, x20, x20 + adcs x21, x21, x21 + cmp x24, #0 // =0 + csel x24, x26, xzr, ne + csel x21, x21, xzr, ne + csel x20, x20, xzr, ne + csel x23, x23, xzr, ne + csel x22, x22, xzr, ne + adds x17, x17, x22 + adcs x12, x12, x23 + adcs x13, x13, x20 + adcs x16, x16, x21 + adcs x20, x27, x24 + adds x10, x10, x18 + str x10, [x19, #32] + adcs x10, x11, x0 + adcs x8, x14, x8 + stp x10, x8, [x19, #40] + adcs x8, x15, x9 + str x8, [x19, #56] + adcs x8, x17, x1 + str x8, [x19, #64] + adcs x8, x12, x2 + str x8, [x19, #72] + adcs x8, x13, x3 + str x8, [x19, #80] + adcs x8, x16, x4 + str x8, [x19, #88] + adcs x8, x20, x5 + str x8, [x19, #96] + adcs x8, x6, xzr + str x8, [x19, #104] + adcs x8, x7, xzr + str x8, [x19, #112] + adcs x8, x25, xzr + str x8, [x19, #120] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end113: + .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L + + .globl mcl_fp_mont8L + .align 2 + .type mcl_fp_mont8L,@function +mcl_fp_mont8L: // @mcl_fp_mont8L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #1424 // =1424 + mov x20, x3 + mov x26, x2 + str x26, [sp, #120] // 8-byte Folded Spill + ldur x19, [x20, #-8] + str x19, [sp, #136] // 8-byte Folded Spill + ldr x9, [x26] + mov x27, x1 + str x27, [sp, #128] // 8-byte Folded Spill + str x0, [sp, #112] // 8-byte Folded Spill + sub x8, x29, #160 // =160 + mov x0, x27 + mov x1, x9 + bl .LmulPv512x64 + ldur x24, [x29, #-160] + ldur x8, [x29, #-96] + str x8, [sp, #104] // 8-byte Folded Spill + ldur x8, [x29, #-104] + str x8, [sp, #96] // 8-byte Folded Spill + ldur x8, [x29, #-112] + str x8, [sp, #88] // 8-byte Folded Spill + ldur x8, [x29, #-120] + str x8, [sp, #80] // 8-byte Folded Spill + ldur x8, [x29, #-128] + str x8, [sp, #72] // 8-byte Folded Spill + ldur x8, [x29, #-136] + str x8, [sp, #64] // 8-byte Folded Spill + ldur x8, [x29, #-144] + str x8, [sp, #56] // 8-byte Folded Spill + ldur x8, [x29, #-152] + str x8, [sp, #48] // 8-byte Folded Spill + mul x1, x24, x19 + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv512x64 + ldur x8, [x29, #-176] + str x8, [sp, #40] // 8-byte Folded Spill + ldur x8, [x29, #-184] + str x8, [sp, #32] // 8-byte Folded Spill + ldur x8, [x29, #-192] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x19, x28, [x29, #-208] + ldp x21, x23, [x29, #-224] + ldp x25, x22, [x29, #-240] + ldr x1, [x26, #8] + add x8, sp, #1184 // =1184 + mov x0, x27 + bl .LmulPv512x64 + cmn x25, x24 + ldr x8, [sp, #1248] + ldr x9, [sp, #1240] + ldp x10, x12, [sp, #48] + adcs x10, x22, x10 + ldr x11, [sp, #1232] + adcs x12, x21, x12 + ldr x13, [sp, #1224] + ldp x14, x16, [sp, #64] + adcs x14, x23, x14 + ldr x15, [sp, #1216] + adcs x16, x19, x16 + ldr x17, [sp, #1208] + ldp x18, x1, [sp, #80] + adcs x18, x28, x18 + ldr x0, [sp, #1200] + ldp x2, x4, [sp, #24] + adcs x1, x2, x1 + ldr x2, [sp, #1184] + ldp x3, x5, [sp, #96] + adcs x3, x4, x3 + ldr x4, [sp, #1192] + ldr x6, [sp, #40] // 8-byte Folded Reload + adcs x5, x6, x5 + adcs x6, xzr, xzr + adds x19, x10, x2 + adcs x10, x12, x4 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x0 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + adcs x8, x6, x8 + stp x8, x9, [sp, #96] + adcs x8, xzr, xzr + stp x8, x10, [sp, #48] + ldr x22, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x22 + add x8, sp, #1104 // =1104 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #1168] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1160] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1152] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #1144] + str x8, [sp, #8] // 8-byte Folded Spill + ldr x25, [sp, #1136] + ldr x26, [sp, #1128] + ldr x27, [sp, #1120] + ldr x21, [sp, #1112] + ldr x28, [sp, #1104] + ldp x24, x23, [sp, #120] + ldr x1, [x24, #16] + add x8, sp, #1024 // =1024 + mov x0, x23 + bl .LmulPv512x64 + cmn x19, x28 + ldr x8, [sp, #1088] + ldr x9, [sp, #1080] + ldr x10, [sp, #40] // 8-byte Folded Reload + adcs x10, x10, x21 + ldr x11, [sp, #1072] + ldp x14, x12, [sp, #80] + adcs x12, x12, x27 + ldr x13, [sp, #1064] + adcs x14, x14, x26 + ldr x15, [sp, #1056] + ldp x18, x16, [sp, #64] + adcs x16, x16, x25 + ldr x17, [sp, #1048] + ldp x0, x2, [sp, #8] + adcs x18, x18, x0 + ldr x0, [sp, #1040] + ldr x1, [sp, #56] // 8-byte Folded Reload + adcs x1, x1, x2 + ldr x2, [sp, #1024] + ldp x5, x3, [sp, #96] + ldp x4, x6, [sp, #24] + adcs x3, x3, x4 + ldr x4, [sp, #1032] + adcs x5, x5, x6 + ldr x6, [sp, #48] // 8-byte Folded Reload + adcs x6, x6, xzr + adds x19, x10, x2 + adcs x10, x12, x4 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x0 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + adcs x8, x6, x8 + stp x8, x9, [sp, #96] + adcs x8, xzr, xzr + stp x8, x10, [sp, #48] + mul x1, x19, x22 + add x8, sp, #944 // =944 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #1008] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1000] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #992] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #984] + str x8, [sp, #8] // 8-byte Folded Spill + ldr x25, [sp, #976] + ldr x26, [sp, #968] + ldr x27, [sp, #960] + ldr x21, [sp, #952] + ldr x28, [sp, #944] + mov x22, x24 + ldr x1, [x22, #24] + add x8, sp, #864 // =864 + mov x0, x23 + bl .LmulPv512x64 + cmn x19, x28 + ldr x8, [sp, #928] + ldr x9, [sp, #920] + ldr x10, [sp, #40] // 8-byte Folded Reload + adcs x10, x10, x21 + ldr x11, [sp, #912] + ldp x14, x12, [sp, #80] + adcs x12, x12, x27 + ldr x13, [sp, #904] + adcs x14, x14, x26 + ldr x15, [sp, #896] + ldp x18, x16, [sp, #64] + adcs x16, x16, x25 + ldr x17, [sp, #888] + ldp x0, x2, [sp, #8] + adcs x18, x18, x0 + ldr x0, [sp, #880] + ldr x1, [sp, #56] // 8-byte Folded Reload + adcs x1, x1, x2 + ldr x2, [sp, #864] + ldp x5, x3, [sp, #96] + ldp x4, x6, [sp, #24] + adcs x3, x3, x4 + ldr x4, [sp, #872] + adcs x5, x5, x6 + ldr x6, [sp, #48] // 8-byte Folded Reload + adcs x6, x6, xzr + adds x19, x10, x2 + adcs x10, x12, x4 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x0 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + adcs x8, x6, x8 + stp x8, x9, [sp, #96] + adcs x8, xzr, xzr + stp x8, x10, [sp, #48] + ldr x23, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x23 + add x8, sp, #784 // =784 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #848] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #840] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #832] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x24, [sp, #824] + ldr x25, [sp, #816] + ldr x26, [sp, #808] + ldr x27, [sp, #800] + ldr x21, [sp, #792] + ldr x28, [sp, #784] + ldr x1, [x22, #32] + add x8, sp, #704 // =704 + ldr x22, [sp, #128] // 8-byte Folded Reload + mov x0, x22 + bl .LmulPv512x64 + cmn x19, x28 + ldr x8, [sp, #768] + ldr x9, [sp, #760] + ldr x10, [sp, #40] // 8-byte Folded Reload + adcs x10, x10, x21 + ldr x11, [sp, #752] + ldp x14, x12, [sp, #80] + adcs x12, x12, x27 + ldr x13, [sp, #744] + adcs x14, x14, x26 + ldr x15, [sp, #736] + ldp x18, x16, [sp, #64] + adcs x16, x16, x25 + ldr x17, [sp, #728] + adcs x18, x18, x24 + ldr x0, [sp, #720] + ldr x1, [sp, #56] // 8-byte Folded Reload + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #704] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldr x4, [sp, #712] + ldr x6, [sp, #32] // 8-byte Folded Reload + adcs x5, x5, x6 + ldr x6, [sp, #48] // 8-byte Folded Reload + adcs x6, x6, xzr + adds x19, x10, x2 + adcs x10, x12, x4 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x0 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + adcs x8, x6, x8 + stp x8, x9, [sp, #96] + adcs x8, xzr, xzr + stp x8, x10, [sp, #48] + mul x1, x19, x23 + add x8, sp, #624 // =624 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #688] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #680] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #672] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x24, [sp, #664] + ldr x25, [sp, #656] + ldr x26, [sp, #648] + ldr x27, [sp, #640] + ldr x21, [sp, #632] + ldr x28, [sp, #624] + ldr x23, [sp, #120] // 8-byte Folded Reload + ldr x1, [x23, #40] + add x8, sp, #544 // =544 + mov x0, x22 + bl .LmulPv512x64 + cmn x19, x28 + ldr x8, [sp, #608] + ldr x9, [sp, #600] + ldr x10, [sp, #40] // 8-byte Folded Reload + adcs x10, x10, x21 + ldr x11, [sp, #592] + ldp x14, x12, [sp, #80] + adcs x12, x12, x27 + ldr x13, [sp, #584] + adcs x14, x14, x26 + ldr x15, [sp, #576] + ldp x18, x16, [sp, #64] + adcs x16, x16, x25 + ldr x17, [sp, #568] + adcs x18, x18, x24 + ldr x0, [sp, #560] + ldr x1, [sp, #56] // 8-byte Folded Reload + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #544] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldr x4, [sp, #552] + ldr x6, [sp, #32] // 8-byte Folded Reload + adcs x5, x5, x6 + ldr x6, [sp, #48] // 8-byte Folded Reload + adcs x6, x6, xzr + adds x19, x10, x2 + adcs x10, x12, x4 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x0 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + adcs x8, x6, x8 + stp x8, x9, [sp, #96] + adcs x8, xzr, xzr + stp x8, x10, [sp, #48] + ldr x22, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x22 + add x8, sp, #464 // =464 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #528] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #520] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #512] + str x8, [sp, #16] // 8-byte Folded Spill + ldp x25, x24, [sp, #496] + ldp x27, x26, [sp, #480] + ldp x28, x21, [sp, #464] + ldr x1, [x23, #48] + add x8, sp, #384 // =384 + ldr x23, [sp, #128] // 8-byte Folded Reload + mov x0, x23 + bl .LmulPv512x64 + cmn x19, x28 + ldp x9, x8, [sp, #440] + ldr x10, [sp, #40] // 8-byte Folded Reload + adcs x10, x10, x21 + ldp x13, x11, [sp, #424] + ldp x14, x12, [sp, #80] + adcs x12, x12, x27 + adcs x14, x14, x26 + ldp x17, x15, [sp, #408] + ldp x18, x16, [sp, #64] + adcs x16, x16, x25 + adcs x18, x18, x24 + ldr x1, [sp, #56] // 8-byte Folded Reload + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #384] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldp x4, x0, [sp, #392] + ldr x6, [sp, #32] // 8-byte Folded Reload + adcs x5, x5, x6 + ldr x6, [sp, #48] // 8-byte Folded Reload + adcs x6, x6, xzr + adds x19, x10, x2 + adcs x10, x12, x4 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x0 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + adcs x8, x6, x8 + stp x8, x9, [sp, #96] + adcs x8, xzr, xzr + stp x8, x10, [sp, #48] + mul x1, x19, x22 + add x8, sp, #304 // =304 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #368] + str x8, [sp, #32] // 8-byte Folded Spill + ldp x22, x8, [sp, #352] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x25, x24, [sp, #336] + ldp x27, x26, [sp, #320] + ldp x28, x21, [sp, #304] + ldr x8, [sp, #120] // 8-byte Folded Reload + ldr x1, [x8, #56] + add x8, sp, #224 // =224 + mov x0, x23 + bl .LmulPv512x64 + cmn x19, x28 + ldp x9, x8, [sp, #280] + ldr x10, [sp, #40] // 8-byte Folded Reload + adcs x10, x10, x21 + ldp x13, x11, [sp, #264] + ldp x14, x12, [sp, #80] + adcs x12, x12, x27 + adcs x14, x14, x26 + ldp x17, x15, [sp, #248] + ldp x18, x16, [sp, #64] + adcs x16, x16, x25 + adcs x18, x18, x24 + ldr x1, [sp, #56] // 8-byte Folded Reload + adcs x1, x1, x22 + ldr x2, [sp, #224] + ldp x5, x3, [sp, #96] + ldp x4, x6, [sp, #24] + adcs x3, x3, x4 + ldp x4, x0, [sp, #232] + adcs x5, x5, x6 + ldr x6, [sp, #48] // 8-byte Folded Reload + adcs x6, x6, xzr + adds x19, x10, x2 + adcs x21, x12, x4 + adcs x22, x14, x0 + adcs x23, x16, x17 + adcs x24, x18, x15 + adcs x25, x1, x13 + adcs x10, x3, x11 + str x10, [sp, #128] // 8-byte Folded Spill + adcs x27, x5, x9 + adcs x28, x6, x8 + adcs x26, xzr, xzr + ldr x8, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x8 + add x8, sp, #144 // =144 + mov x0, x20 + bl .LmulPv512x64 + ldp x15, x8, [sp, #200] + ldp x9, x10, [sp, #144] + ldp x11, x12, [sp, #160] + cmn x19, x9 + ldp x13, x9, [sp, #176] + adcs x10, x21, x10 + ldr x14, [sp, #192] + adcs x11, x22, x11 + adcs x12, x23, x12 + adcs x13, x24, x13 + adcs x9, x25, x9 + ldp x16, x17, [x20, #48] + ldp x18, x0, [x20, #32] + ldp x1, x2, [x20, #16] + ldp x3, x4, [x20] + ldr x5, [sp, #128] // 8-byte Folded Reload + adcs x14, x5, x14 + adcs x15, x27, x15 + adcs x8, x28, x8 + adcs x5, x26, xzr + subs x3, x10, x3 + sbcs x4, x11, x4 + sbcs x1, x12, x1 + sbcs x2, x13, x2 + sbcs x18, x9, x18 + sbcs x0, x14, x0 + sbcs x16, x15, x16 + sbcs x17, x8, x17 + sbcs x5, x5, xzr + tst x5, #0x1 + csel x10, x10, x3, ne + csel x11, x11, x4, ne + csel x12, x12, x1, ne + csel x13, x13, x2, ne + csel x9, x9, x18, ne + csel x14, x14, x0, ne + csel x15, x15, x16, ne + csel x8, x8, x17, ne + ldr x16, [sp, #112] // 8-byte Folded Reload + stp x10, x11, [x16] + stp x12, x13, [x16, #16] + stp x9, x14, [x16, #32] + stp x15, x8, [x16, #48] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end114: + .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L + + .globl mcl_fp_montNF8L + .align 2 + .type mcl_fp_montNF8L,@function +mcl_fp_montNF8L: // @mcl_fp_montNF8L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #1424 // =1424 + mov x20, x3 + mov x26, x2 + str x26, [sp, #128] // 8-byte Folded Spill + ldur x19, [x20, #-8] + str x19, [sp, #136] // 8-byte Folded Spill + ldr x9, [x26] + mov x27, x1 + stp x0, x27, [sp, #112] + sub x8, x29, #160 // =160 + mov x0, x27 + mov x1, x9 + bl .LmulPv512x64 + ldur x24, [x29, #-160] + ldur x8, [x29, #-96] + str x8, [sp, #104] // 8-byte Folded Spill + ldur x8, [x29, #-104] + str x8, [sp, #96] // 8-byte Folded Spill + ldur x8, [x29, #-112] + str x8, [sp, #88] // 8-byte Folded Spill + ldur x8, [x29, #-120] + str x8, [sp, #80] // 8-byte Folded Spill + ldur x8, [x29, #-128] + str x8, [sp, #72] // 8-byte Folded Spill + ldur x8, [x29, #-136] + str x8, [sp, #64] // 8-byte Folded Spill + ldur x8, [x29, #-144] + str x8, [sp, #56] // 8-byte Folded Spill + ldur x8, [x29, #-152] + str x8, [sp, #48] // 8-byte Folded Spill + mul x1, x24, x19 + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv512x64 + ldur x8, [x29, #-176] + str x8, [sp, #40] // 8-byte Folded Spill + ldur x8, [x29, #-184] + str x8, [sp, #32] // 8-byte Folded Spill + ldur x8, [x29, #-192] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x19, x28, [x29, #-208] + ldp x21, x23, [x29, #-224] + ldp x25, x22, [x29, #-240] + ldr x1, [x26, #8] + add x8, sp, #1184 // =1184 + mov x0, x27 + bl .LmulPv512x64 + cmn x25, x24 + ldr x8, [sp, #1248] + ldr x9, [sp, #1240] + ldp x10, x12, [sp, #48] + adcs x10, x22, x10 + ldr x11, [sp, #1232] + adcs x12, x21, x12 + ldr x13, [sp, #1224] + ldp x14, x16, [sp, #64] + adcs x14, x23, x14 + ldr x15, [sp, #1216] + adcs x16, x19, x16 + ldr x17, [sp, #1208] + ldp x18, x1, [sp, #80] + adcs x18, x28, x18 + ldr x0, [sp, #1192] + ldp x2, x4, [sp, #24] + adcs x1, x2, x1 + ldr x2, [sp, #1184] + ldp x3, x5, [sp, #96] + adcs x3, x4, x3 + ldr x4, [sp, #1200] + ldr x6, [sp, #40] // 8-byte Folded Reload + adcs x5, x6, x5 + adds x19, x10, x2 + adcs x10, x12, x0 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x4 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x27, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x27 + add x8, sp, #1104 // =1104 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #1168] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #1160] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1152] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1144] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x23, [sp, #1136] + ldr x24, [sp, #1128] + ldr x25, [sp, #1120] + ldr x21, [sp, #1112] + ldr x26, [sp, #1104] + ldp x22, x28, [sp, #120] + ldr x1, [x28, #16] + add x8, sp, #1024 // =1024 + mov x0, x22 + bl .LmulPv512x64 + cmn x19, x26 + ldr x8, [sp, #1088] + ldr x9, [sp, #1080] + ldp x10, x18, [sp, #48] + adcs x10, x10, x21 + ldr x11, [sp, #1072] + ldp x14, x12, [sp, #72] + adcs x12, x12, x25 + ldr x13, [sp, #1064] + adcs x14, x14, x24 + ldr x15, [sp, #1056] + ldr x16, [sp, #64] // 8-byte Folded Reload + adcs x16, x16, x23 + ldr x17, [sp, #1048] + ldp x0, x2, [sp, #16] + adcs x18, x18, x0 + ldr x0, [sp, #1032] + ldp x3, x1, [sp, #96] + adcs x1, x1, x2 + ldr x2, [sp, #1024] + ldp x4, x6, [sp, #32] + adcs x3, x3, x4 + ldr x4, [sp, #1040] + ldr x5, [sp, #88] // 8-byte Folded Reload + adcs x5, x5, x6 + adds x19, x10, x2 + adcs x10, x12, x0 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x4 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + mul x1, x19, x27 + add x8, sp, #944 // =944 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #1008] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #1000] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #992] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #984] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x23, [sp, #976] + ldr x24, [sp, #968] + ldr x25, [sp, #960] + ldr x21, [sp, #952] + ldr x26, [sp, #944] + ldr x1, [x28, #24] + add x8, sp, #864 // =864 + mov x27, x22 + mov x0, x27 + bl .LmulPv512x64 + cmn x19, x26 + ldr x8, [sp, #928] + ldr x9, [sp, #920] + ldp x10, x18, [sp, #48] + adcs x10, x10, x21 + ldr x11, [sp, #912] + ldp x14, x12, [sp, #72] + adcs x12, x12, x25 + ldr x13, [sp, #904] + adcs x14, x14, x24 + ldr x15, [sp, #896] + ldr x16, [sp, #64] // 8-byte Folded Reload + adcs x16, x16, x23 + ldr x17, [sp, #888] + ldp x0, x2, [sp, #16] + adcs x18, x18, x0 + ldr x0, [sp, #872] + ldp x3, x1, [sp, #96] + adcs x1, x1, x2 + ldr x2, [sp, #864] + ldp x4, x6, [sp, #32] + adcs x3, x3, x4 + ldr x4, [sp, #880] + ldr x5, [sp, #88] // 8-byte Folded Reload + adcs x5, x5, x6 + adds x19, x10, x2 + adcs x10, x12, x0 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x4 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x28, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x28 + add x8, sp, #784 // =784 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #848] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #840] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #832] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #824] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x23, [sp, #816] + ldr x24, [sp, #808] + ldr x25, [sp, #800] + ldr x21, [sp, #792] + ldr x26, [sp, #784] + ldr x22, [sp, #128] // 8-byte Folded Reload + ldr x1, [x22, #32] + add x8, sp, #704 // =704 + mov x0, x27 + bl .LmulPv512x64 + cmn x19, x26 + ldr x8, [sp, #768] + ldr x9, [sp, #760] + ldp x10, x18, [sp, #48] + adcs x10, x10, x21 + ldr x11, [sp, #752] + ldp x14, x12, [sp, #72] + adcs x12, x12, x25 + ldr x13, [sp, #744] + adcs x14, x14, x24 + ldr x15, [sp, #736] + ldr x16, [sp, #64] // 8-byte Folded Reload + adcs x16, x16, x23 + ldr x17, [sp, #728] + ldp x0, x2, [sp, #16] + adcs x18, x18, x0 + ldr x0, [sp, #712] + ldp x3, x1, [sp, #96] + adcs x1, x1, x2 + ldr x2, [sp, #704] + ldp x4, x6, [sp, #32] + adcs x3, x3, x4 + ldr x4, [sp, #720] + ldr x5, [sp, #88] // 8-byte Folded Reload + adcs x5, x5, x6 + adds x19, x10, x2 + adcs x10, x12, x0 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x4 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + mul x1, x19, x28 + add x8, sp, #624 // =624 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #688] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #680] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #672] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #664] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x23, [sp, #656] + ldr x24, [sp, #648] + ldr x25, [sp, #640] + ldr x21, [sp, #632] + ldr x26, [sp, #624] + mov x27, x22 + ldr x1, [x27, #40] + add x8, sp, #544 // =544 + ldr x28, [sp, #120] // 8-byte Folded Reload + mov x0, x28 + bl .LmulPv512x64 + cmn x19, x26 + ldr x8, [sp, #608] + ldr x9, [sp, #600] + ldp x10, x18, [sp, #48] + adcs x10, x10, x21 + ldr x11, [sp, #592] + ldp x14, x12, [sp, #72] + adcs x12, x12, x25 + ldr x13, [sp, #584] + adcs x14, x14, x24 + ldr x15, [sp, #576] + ldr x16, [sp, #64] // 8-byte Folded Reload + adcs x16, x16, x23 + ldr x17, [sp, #568] + ldp x0, x2, [sp, #16] + adcs x18, x18, x0 + ldr x0, [sp, #552] + ldp x3, x1, [sp, #96] + adcs x1, x1, x2 + ldr x2, [sp, #544] + ldp x4, x6, [sp, #32] + adcs x3, x3, x4 + ldr x4, [sp, #560] + ldr x5, [sp, #88] // 8-byte Folded Reload + adcs x5, x5, x6 + adds x19, x10, x2 + adcs x10, x12, x0 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x4 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x22, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x22 + add x8, sp, #464 // =464 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #528] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #520] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #512] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x23, x8, [sp, #496] + str x8, [sp, #16] // 8-byte Folded Spill + ldp x25, x24, [sp, #480] + ldp x26, x21, [sp, #464] + ldr x1, [x27, #48] + add x8, sp, #384 // =384 + mov x0, x28 + bl .LmulPv512x64 + cmn x19, x26 + ldp x9, x8, [sp, #440] + ldp x10, x18, [sp, #48] + adcs x10, x10, x21 + ldp x13, x11, [sp, #424] + ldp x14, x12, [sp, #72] + adcs x12, x12, x25 + adcs x14, x14, x24 + ldp x17, x15, [sp, #408] + ldr x16, [sp, #64] // 8-byte Folded Reload + adcs x16, x16, x23 + ldp x0, x2, [sp, #16] + adcs x18, x18, x0 + ldp x3, x1, [sp, #96] + adcs x1, x1, x2 + ldp x2, x0, [sp, #384] + ldp x4, x6, [sp, #32] + adcs x3, x3, x4 + ldr x4, [sp, #400] + ldr x5, [sp, #88] // 8-byte Folded Reload + adcs x5, x5, x6 + adds x19, x10, x2 + adcs x10, x12, x0 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x4 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x17 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x15 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x13 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x11 + adcs x9, x5, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + mul x1, x19, x22 + add x8, sp, #304 // =304 + mov x0, x20 + bl .LmulPv512x64 + ldp x27, x8, [sp, #360] + str x8, [sp, #40] // 8-byte Folded Spill + ldp x22, x28, [sp, #344] + ldp x24, x23, [sp, #328] + ldp x21, x25, [sp, #312] + ldr x26, [sp, #304] + ldp x0, x8, [sp, #120] + ldr x1, [x8, #56] + add x8, sp, #224 // =224 + bl .LmulPv512x64 + cmn x19, x26 + ldp x9, x8, [sp, #280] + ldp x10, x18, [sp, #48] + adcs x10, x10, x21 + ldp x13, x11, [sp, #264] + ldp x14, x12, [sp, #72] + adcs x12, x12, x25 + adcs x14, x14, x24 + ldp x17, x15, [sp, #248] + ldr x16, [sp, #64] // 8-byte Folded Reload + adcs x16, x16, x23 + adcs x18, x18, x22 + ldp x2, x0, [sp, #224] + ldp x3, x1, [sp, #96] + adcs x1, x1, x28 + adcs x3, x3, x27 + ldr x4, [sp, #240] + ldr x5, [sp, #88] // 8-byte Folded Reload + ldr x6, [sp, #40] // 8-byte Folded Reload + adcs x5, x5, x6 + adds x19, x10, x2 + adcs x21, x12, x0 + adcs x22, x14, x4 + adcs x23, x16, x17 + adcs x24, x18, x15 + adcs x25, x1, x13 + adcs x26, x3, x11 + adcs x27, x5, x9 + adcs x28, x8, xzr + ldr x8, [sp, #136] // 8-byte Folded Reload + mul x1, x19, x8 + add x8, sp, #144 // =144 + mov x0, x20 + bl .LmulPv512x64 + ldp x15, x8, [sp, #200] + ldp x9, x10, [sp, #144] + ldp x11, x12, [sp, #160] + cmn x19, x9 + ldp x13, x9, [sp, #176] + adcs x10, x21, x10 + ldr x14, [sp, #192] + adcs x11, x22, x11 + adcs x12, x23, x12 + adcs x13, x24, x13 + adcs x9, x25, x9 + ldp x16, x17, [x20, #48] + ldp x18, x0, [x20, #32] + ldp x1, x2, [x20, #16] + ldp x3, x4, [x20] + adcs x14, x26, x14 + adcs x15, x27, x15 + adcs x8, x28, x8 + subs x3, x10, x3 + sbcs x4, x11, x4 + sbcs x1, x12, x1 + sbcs x2, x13, x2 + sbcs x18, x9, x18 + sbcs x0, x14, x0 + sbcs x16, x15, x16 + sbcs x17, x8, x17 + cmp x17, #0 // =0 + csel x10, x10, x3, lt + csel x11, x11, x4, lt + csel x12, x12, x1, lt + csel x13, x13, x2, lt + csel x9, x9, x18, lt + csel x14, x14, x0, lt + csel x15, x15, x16, lt + csel x8, x8, x17, lt + ldr x16, [sp, #112] // 8-byte Folded Reload + stp x10, x11, [x16] + stp x12, x13, [x16, #16] + stp x9, x14, [x16, #32] + stp x15, x8, [x16, #48] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end115: + .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L + + .globl mcl_fp_montRed8L + .align 2 + .type mcl_fp_montRed8L,@function +mcl_fp_montRed8L: // @mcl_fp_montRed8L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #800 // =800 + mov x20, x2 + ldur x9, [x20, #-8] + str x9, [sp, #32] // 8-byte Folded Spill + ldr x8, [x20, #48] + str x8, [sp, #144] // 8-byte Folded Spill + ldr x8, [x20, #56] + str x8, [sp, #152] // 8-byte Folded Spill + ldr x8, [x20, #32] + str x8, [sp, #120] // 8-byte Folded Spill + ldr x8, [x20, #40] + str x8, [sp, #128] // 8-byte Folded Spill + ldr x8, [x20, #16] + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [x20, #24] + str x8, [sp, #112] // 8-byte Folded Spill + ldr x8, [x20] + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [x20, #8] + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [x1, #112] + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [x1, #120] + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [x1, #96] + str x8, [sp, #56] // 8-byte Folded Spill + ldr x8, [x1, #104] + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [x1, #80] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [x1, #88] + str x8, [sp, #48] // 8-byte Folded Spill + ldp x28, x8, [x1, #64] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x22, x25, [x1, #48] + ldp x24, x19, [x1, #32] + ldp x27, x26, [x1, #16] + ldp x21, x23, [x1] + str x0, [sp, #136] // 8-byte Folded Spill + mul x1, x21, x9 + sub x8, x29, #160 // =160 + mov x0, x20 + bl .LmulPv512x64 + ldp x9, x8, [x29, #-104] + ldp x11, x10, [x29, #-120] + ldp x16, x12, [x29, #-136] + ldp x13, x14, [x29, #-160] + ldur x15, [x29, #-144] + cmn x21, x13 + adcs x21, x23, x14 + adcs x13, x27, x15 + adcs x26, x26, x16 + adcs x24, x24, x12 + adcs x11, x19, x11 + stp x11, x13, [sp, #8] + adcs x22, x22, x10 + adcs x25, x25, x9 + adcs x27, x28, x8 + ldr x8, [sp, #24] // 8-byte Folded Reload + adcs x28, x8, xzr + ldp x19, x8, [sp, #32] + adcs x23, x8, xzr + ldr x8, [sp, #48] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #48] // 8-byte Folded Spill + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #56] // 8-byte Folded Spill + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + adcs x8, xzr, xzr + str x8, [sp, #40] // 8-byte Folded Spill + mul x1, x21, x19 + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv512x64 + ldp x9, x8, [x29, #-184] + ldp x11, x10, [x29, #-200] + ldp x16, x12, [x29, #-216] + ldp x13, x14, [x29, #-240] + ldur x15, [x29, #-224] + cmn x21, x13 + ldr x13, [sp, #16] // 8-byte Folded Reload + adcs x21, x13, x14 + adcs x13, x26, x15 + str x13, [sp, #24] // 8-byte Folded Spill + adcs x24, x24, x16 + ldr x13, [sp, #8] // 8-byte Folded Reload + adcs x12, x13, x12 + str x12, [sp, #16] // 8-byte Folded Spill + adcs x22, x22, x11 + adcs x25, x25, x10 + adcs x27, x27, x9 + adcs x28, x28, x8 + adcs x23, x23, xzr + ldr x8, [sp, #48] // 8-byte Folded Reload + adcs x26, x8, xzr + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #56] // 8-byte Folded Spill + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #40] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #48] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #560 // =560 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #624] + ldr x9, [sp, #616] + ldr x10, [sp, #608] + ldr x11, [sp, #600] + ldr x12, [sp, #592] + ldr x13, [sp, #560] + ldr x14, [sp, #568] + ldr x15, [sp, #576] + ldr x16, [sp, #584] + cmn x21, x13 + ldr x13, [sp, #24] // 8-byte Folded Reload + adcs x21, x13, x14 + adcs x13, x24, x15 + str x13, [sp, #40] // 8-byte Folded Spill + ldr x13, [sp, #16] // 8-byte Folded Reload + adcs x13, x13, x16 + str x13, [sp, #24] // 8-byte Folded Spill + adcs x22, x22, x12 + adcs x25, x25, x11 + adcs x27, x27, x10 + adcs x28, x28, x9 + adcs x23, x23, x8 + adcs x26, x26, xzr + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x24, x8, xzr + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #48] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #56] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #480 // =480 + mov x0, x20 + bl .LmulPv512x64 + ldr x8, [sp, #544] + ldr x9, [sp, #536] + ldr x10, [sp, #528] + ldr x11, [sp, #520] + ldr x12, [sp, #512] + ldp x13, x14, [sp, #480] + ldp x15, x16, [sp, #496] + cmn x21, x13 + ldr x13, [sp, #40] // 8-byte Folded Reload + adcs x21, x13, x14 + ldr x13, [sp, #24] // 8-byte Folded Reload + adcs x13, x13, x15 + adcs x22, x22, x16 + adcs x25, x25, x12 + adcs x27, x27, x11 + adcs x28, x28, x10 + adcs x23, x23, x9 + adcs x26, x26, x8 + adcs x24, x24, xzr + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x8, x8, xzr + stp x13, x8, [sp, #48] + mul x1, x21, x19 + add x8, sp, #400 // =400 + mov x0, x20 + bl .LmulPv512x64 + ldp x9, x8, [sp, #456] + ldp x11, x10, [sp, #440] + ldp x16, x12, [sp, #424] + ldp x13, x14, [sp, #400] + ldr x15, [sp, #416] + cmn x21, x13 + ldr x13, [sp, #48] // 8-byte Folded Reload + adcs x21, x13, x14 + adcs x13, x22, x15 + str x13, [sp, #48] // 8-byte Folded Spill + adcs x25, x25, x16 + adcs x27, x27, x12 + adcs x28, x28, x11 + adcs x23, x23, x10 + adcs x26, x26, x9 + adcs x24, x24, x8 + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x22, x8, xzr + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #320 // =320 + mov x0, x20 + bl .LmulPv512x64 + ldp x9, x8, [sp, #376] + ldp x11, x10, [sp, #360] + ldp x16, x12, [sp, #344] + ldp x13, x14, [sp, #320] + ldr x15, [sp, #336] + cmn x21, x13 + ldr x13, [sp, #48] // 8-byte Folded Reload + adcs x21, x13, x14 + adcs x13, x25, x15 + adcs x27, x27, x16 + adcs x28, x28, x12 + adcs x23, x23, x11 + adcs x26, x26, x10 + adcs x24, x24, x9 + ldr x9, [sp, #64] // 8-byte Folded Reload + adcs x8, x9, x8 + stp x13, x8, [sp, #56] + adcs x22, x22, xzr + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x25, x8, xzr + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #240 // =240 + mov x0, x20 + bl .LmulPv512x64 + ldp x9, x8, [sp, #296] + ldp x11, x10, [sp, #280] + ldp x16, x12, [sp, #264] + ldp x13, x14, [sp, #240] + ldr x15, [sp, #256] + cmn x21, x13 + ldr x13, [sp, #56] // 8-byte Folded Reload + adcs x21, x13, x14 + adcs x13, x27, x15 + adcs x28, x28, x16 + adcs x23, x23, x12 + adcs x26, x26, x11 + adcs x24, x24, x10 + ldr x10, [sp, #64] // 8-byte Folded Reload + adcs x9, x10, x9 + stp x9, x13, [sp, #64] + adcs x22, x22, x8 + adcs x25, x25, xzr + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x27, x8, xzr + mul x1, x21, x19 + add x8, sp, #160 // =160 + mov x0, x20 + bl .LmulPv512x64 + ldp x9, x8, [sp, #216] + ldp x11, x10, [sp, #200] + ldp x16, x12, [sp, #184] + ldp x13, x14, [sp, #160] + ldr x15, [sp, #176] + cmn x21, x13 + ldr x13, [sp, #72] // 8-byte Folded Reload + adcs x13, x13, x14 + adcs x14, x28, x15 + adcs x15, x23, x16 + adcs x12, x26, x12 + adcs x11, x24, x11 + ldr x16, [sp, #64] // 8-byte Folded Reload + adcs x10, x16, x10 + adcs x9, x22, x9 + adcs x8, x25, x8 + adcs x16, x27, xzr + ldp x17, x18, [sp, #88] + subs x17, x13, x17 + sbcs x18, x14, x18 + ldp x0, x1, [sp, #104] + sbcs x0, x15, x0 + sbcs x1, x12, x1 + ldp x2, x3, [sp, #120] + sbcs x2, x11, x2 + sbcs x3, x10, x3 + ldp x4, x5, [sp, #144] + sbcs x4, x9, x4 + sbcs x5, x8, x5 + sbcs x16, x16, xzr + tst x16, #0x1 + csel x13, x13, x17, ne + csel x14, x14, x18, ne + csel x15, x15, x0, ne + csel x12, x12, x1, ne + csel x11, x11, x2, ne + csel x10, x10, x3, ne + csel x9, x9, x4, ne + csel x8, x8, x5, ne + ldr x16, [sp, #136] // 8-byte Folded Reload + stp x13, x14, [x16] + stp x15, x12, [x16, #16] + stp x11, x10, [x16, #32] + stp x9, x8, [x16, #48] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end116: + .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L + + .globl mcl_fp_addPre8L + .align 2 + .type mcl_fp_addPre8L,@function +mcl_fp_addPre8L: // @mcl_fp_addPre8L +// BB#0: + ldp x8, x9, [x2, #48] + ldp x10, x11, [x1, #48] + ldp x12, x13, [x2, #32] + ldp x14, x15, [x1, #32] + ldp x16, x17, [x2, #16] + ldp x18, x2, [x2] + ldp x3, x4, [x1] + ldp x5, x1, [x1, #16] + adds x18, x18, x3 + str x18, [x0] + adcs x18, x2, x4 + adcs x16, x16, x5 + stp x18, x16, [x0, #8] + adcs x16, x17, x1 + adcs x12, x12, x14 + stp x16, x12, [x0, #24] + adcs x12, x13, x15 + adcs x8, x8, x10 + stp x12, x8, [x0, #40] + adcs x9, x9, x11 + adcs x8, xzr, xzr + str x9, [x0, #56] + mov x0, x8 + ret +.Lfunc_end117: + .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L + + .globl mcl_fp_subPre8L + .align 2 + .type mcl_fp_subPre8L,@function +mcl_fp_subPre8L: // @mcl_fp_subPre8L +// BB#0: + ldp x8, x9, [x2, #48] + ldp x10, x11, [x1, #48] + ldp x12, x13, [x2, #32] + ldp x14, x15, [x1, #32] + ldp x16, x17, [x2, #16] + ldp x18, x2, [x2] + ldp x3, x4, [x1] + ldp x5, x1, [x1, #16] + subs x18, x3, x18 + str x18, [x0] + sbcs x18, x4, x2 + sbcs x16, x5, x16 + stp x18, x16, [x0, #8] + sbcs x16, x1, x17 + sbcs x12, x14, x12 + stp x16, x12, [x0, #24] + sbcs x12, x15, x13 + sbcs x8, x10, x8 + stp x12, x8, [x0, #40] + sbcs x9, x11, x9 + ngcs x8, xzr + and x8, x8, #0x1 + str x9, [x0, #56] + mov x0, x8 + ret +.Lfunc_end118: + .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L + + .globl mcl_fp_shr1_8L + .align 2 + .type mcl_fp_shr1_8L,@function +mcl_fp_shr1_8L: // @mcl_fp_shr1_8L +// BB#0: + ldp x8, x9, [x1] + ldp x10, x11, [x1, #48] + ldp x12, x13, [x1, #16] + ldp x14, x15, [x1, #32] + extr x8, x9, x8, #1 + extr x9, x12, x9, #1 + extr x12, x13, x12, #1 + extr x13, x14, x13, #1 + extr x14, x15, x14, #1 + extr x15, x10, x15, #1 + extr x10, x11, x10, #1 + lsr x11, x11, #1 + stp x8, x9, [x0] + stp x12, x13, [x0, #16] + stp x14, x15, [x0, #32] + stp x10, x11, [x0, #48] + ret +.Lfunc_end119: + .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L + + .globl mcl_fp_add8L + .align 2 + .type mcl_fp_add8L,@function +mcl_fp_add8L: // @mcl_fp_add8L +// BB#0: + stp x22, x21, [sp, #-32]! + stp x20, x19, [sp, #16] + ldp x8, x9, [x2, #48] + ldp x10, x11, [x1, #48] + ldp x12, x13, [x2, #32] + ldp x14, x15, [x1, #32] + ldp x16, x17, [x2, #16] + ldp x18, x2, [x2] + ldp x4, x5, [x1] + ldp x6, x1, [x1, #16] + adds x18, x18, x4 + adcs x2, x2, x5 + ldp x4, x5, [x3, #48] + adcs x16, x16, x6 + adcs x17, x17, x1 + ldp x1, x6, [x3, #32] + adcs x7, x12, x14 + adcs x19, x13, x15 + ldp x12, x13, [x3] + stp x18, x2, [x0] + stp x16, x17, [x0, #16] + stp x7, x19, [x0, #32] + adcs x8, x8, x10 + adcs x20, x9, x11 + stp x8, x20, [x0, #48] + adcs x21, xzr, xzr + ldp x9, x10, [x3, #16] + subs x15, x18, x12 + sbcs x14, x2, x13 + sbcs x13, x16, x9 + sbcs x12, x17, x10 + sbcs x11, x7, x1 + sbcs x10, x19, x6 + sbcs x9, x8, x4 + sbcs x8, x20, x5 + sbcs x16, x21, xzr + and w16, w16, #0x1 + tbnz w16, #0, .LBB120_2 +// BB#1: // %nocarry + stp x15, x14, [x0] + stp x13, x12, [x0, #16] + stp x11, x10, [x0, #32] + stp x9, x8, [x0, #48] +.LBB120_2: // %carry + ldp x20, x19, [sp, #16] + ldp x22, x21, [sp], #32 + ret +.Lfunc_end120: + .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L + + .globl mcl_fp_addNF8L + .align 2 + .type mcl_fp_addNF8L,@function +mcl_fp_addNF8L: // @mcl_fp_addNF8L +// BB#0: + ldp x8, x9, [x1, #48] + ldp x10, x11, [x2, #48] + ldp x12, x13, [x1, #32] + ldp x14, x15, [x2, #32] + ldp x16, x17, [x1, #16] + ldp x18, x1, [x1] + ldp x4, x5, [x2] + ldp x6, x2, [x2, #16] + adds x18, x4, x18 + adcs x1, x5, x1 + ldp x4, x5, [x3, #48] + adcs x16, x6, x16 + adcs x17, x2, x17 + ldp x2, x6, [x3, #32] + adcs x12, x14, x12 + adcs x13, x15, x13 + ldp x14, x15, [x3] + adcs x8, x10, x8 + ldp x10, x3, [x3, #16] + adcs x9, x11, x9 + subs x11, x18, x14 + sbcs x14, x1, x15 + sbcs x10, x16, x10 + sbcs x15, x17, x3 + sbcs x2, x12, x2 + sbcs x3, x13, x6 + sbcs x4, x8, x4 + sbcs x5, x9, x5 + cmp x5, #0 // =0 + csel x11, x18, x11, lt + csel x14, x1, x14, lt + csel x10, x16, x10, lt + csel x15, x17, x15, lt + csel x12, x12, x2, lt + csel x13, x13, x3, lt + csel x8, x8, x4, lt + csel x9, x9, x5, lt + stp x11, x14, [x0] + stp x10, x15, [x0, #16] + stp x12, x13, [x0, #32] + stp x8, x9, [x0, #48] + ret +.Lfunc_end121: + .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L + + .globl mcl_fp_sub8L + .align 2 + .type mcl_fp_sub8L,@function +mcl_fp_sub8L: // @mcl_fp_sub8L +// BB#0: + ldp x14, x15, [x2, #48] + ldp x16, x17, [x1, #48] + ldp x12, x13, [x2, #32] + ldp x18, x4, [x1, #32] + ldp x10, x11, [x2, #16] + ldp x8, x9, [x2] + ldp x2, x5, [x1] + ldp x6, x1, [x1, #16] + subs x8, x2, x8 + sbcs x9, x5, x9 + stp x8, x9, [x0] + sbcs x10, x6, x10 + sbcs x11, x1, x11 + stp x10, x11, [x0, #16] + sbcs x12, x18, x12 + sbcs x13, x4, x13 + stp x12, x13, [x0, #32] + sbcs x14, x16, x14 + sbcs x15, x17, x15 + stp x14, x15, [x0, #48] + ngcs x16, xzr + and w16, w16, #0x1 + tbnz w16, #0, .LBB122_2 +// BB#1: // %nocarry + ret +.LBB122_2: // %carry + ldp x16, x17, [x3, #48] + ldp x18, x1, [x3] + ldp x2, x4, [x3, #16] + ldp x5, x3, [x3, #32] + adds x8, x18, x8 + adcs x9, x1, x9 + adcs x10, x2, x10 + adcs x11, x4, x11 + adcs x12, x5, x12 + adcs x13, x3, x13 + adcs x14, x16, x14 + adcs x15, x17, x15 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + stp x12, x13, [x0, #32] + stp x14, x15, [x0, #48] + ret +.Lfunc_end122: + .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L + + .globl mcl_fp_subNF8L + .align 2 + .type mcl_fp_subNF8L,@function +mcl_fp_subNF8L: // @mcl_fp_subNF8L +// BB#0: + ldp x8, x9, [x2, #48] + ldp x10, x11, [x1, #48] + ldp x12, x13, [x2, #32] + ldp x14, x15, [x1, #32] + ldp x16, x17, [x2, #16] + ldp x18, x2, [x2] + ldp x4, x5, [x1] + ldp x6, x1, [x1, #16] + subs x18, x4, x18 + sbcs x2, x5, x2 + ldp x4, x5, [x3, #48] + sbcs x16, x6, x16 + sbcs x17, x1, x17 + ldp x1, x6, [x3, #32] + sbcs x12, x14, x12 + sbcs x13, x15, x13 + ldp x14, x15, [x3, #16] + sbcs x8, x10, x8 + ldp x10, x3, [x3] + sbcs x9, x11, x9 + asr x11, x9, #63 + and x10, x11, x10 + and x3, x11, x3 + and x14, x11, x14 + and x15, x11, x15 + and x1, x11, x1 + and x6, x11, x6 + and x4, x11, x4 + and x11, x11, x5 + adds x10, x10, x18 + str x10, [x0] + adcs x10, x3, x2 + str x10, [x0, #8] + adcs x10, x14, x16 + str x10, [x0, #16] + adcs x10, x15, x17 + str x10, [x0, #24] + adcs x10, x1, x12 + str x10, [x0, #32] + adcs x10, x6, x13 + adcs x8, x4, x8 + stp x10, x8, [x0, #40] + adcs x8, x11, x9 + str x8, [x0, #56] + ret +.Lfunc_end123: + .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L + + .globl mcl_fpDbl_add8L + .align 2 + .type mcl_fpDbl_add8L,@function +mcl_fpDbl_add8L: // @mcl_fpDbl_add8L +// BB#0: + ldp x8, x9, [x2, #112] + ldp x10, x11, [x1, #112] + ldp x12, x13, [x2, #96] + ldp x14, x15, [x1, #96] + ldp x16, x5, [x2] + ldp x17, x6, [x1] + ldp x18, x4, [x2, #80] + adds x16, x16, x17 + ldr x17, [x1, #16] + str x16, [x0] + adcs x16, x5, x6 + ldp x5, x6, [x2, #16] + str x16, [x0, #8] + adcs x17, x5, x17 + ldp x16, x5, [x1, #24] + str x17, [x0, #16] + adcs x16, x6, x16 + ldp x17, x6, [x2, #32] + str x16, [x0, #24] + adcs x17, x17, x5 + ldp x16, x5, [x1, #40] + str x17, [x0, #32] + adcs x16, x6, x16 + ldp x17, x6, [x2, #48] + str x16, [x0, #40] + ldr x16, [x1, #56] + adcs x17, x17, x5 + ldp x5, x2, [x2, #64] + str x17, [x0, #48] + adcs x16, x6, x16 + ldp x17, x6, [x1, #64] + str x16, [x0, #56] + ldp x16, x1, [x1, #80] + adcs x17, x5, x17 + adcs x2, x2, x6 + ldp x5, x6, [x3, #48] + adcs x16, x18, x16 + adcs x18, x4, x1 + ldp x1, x4, [x3, #32] + adcs x12, x12, x14 + adcs x13, x13, x15 + ldp x14, x15, [x3, #16] + adcs x8, x8, x10 + ldp x10, x3, [x3] + adcs x9, x9, x11 + adcs x11, xzr, xzr + subs x10, x17, x10 + sbcs x3, x2, x3 + sbcs x14, x16, x14 + sbcs x15, x18, x15 + sbcs x1, x12, x1 + sbcs x4, x13, x4 + sbcs x5, x8, x5 + sbcs x6, x9, x6 + sbcs x11, x11, xzr + tst x11, #0x1 + csel x10, x17, x10, ne + csel x11, x2, x3, ne + csel x14, x16, x14, ne + csel x15, x18, x15, ne + csel x12, x12, x1, ne + csel x13, x13, x4, ne + csel x8, x8, x5, ne + csel x9, x9, x6, ne + stp x10, x11, [x0, #64] + stp x14, x15, [x0, #80] + stp x12, x13, [x0, #96] + stp x8, x9, [x0, #112] + ret +.Lfunc_end124: + .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L + + .globl mcl_fpDbl_sub8L + .align 2 + .type mcl_fpDbl_sub8L,@function +mcl_fpDbl_sub8L: // @mcl_fpDbl_sub8L +// BB#0: + ldp x10, x8, [x2, #112] + ldp x11, x9, [x1, #112] + ldp x12, x13, [x2, #96] + ldp x14, x15, [x1, #96] + ldp x16, x5, [x1] + ldp x17, x4, [x2] + ldr x18, [x1, #80] + subs x16, x16, x17 + ldr x17, [x1, #16] + str x16, [x0] + sbcs x16, x5, x4 + ldp x4, x5, [x2, #16] + str x16, [x0, #8] + sbcs x17, x17, x4 + ldp x16, x4, [x1, #24] + str x17, [x0, #16] + sbcs x16, x16, x5 + ldp x17, x5, [x2, #32] + str x16, [x0, #24] + sbcs x17, x4, x17 + ldp x16, x4, [x1, #40] + str x17, [x0, #32] + sbcs x16, x16, x5 + ldp x17, x5, [x2, #48] + str x16, [x0, #40] + sbcs x17, x4, x17 + ldp x16, x4, [x1, #56] + str x17, [x0, #48] + sbcs x16, x16, x5 + ldp x17, x5, [x2, #64] + str x16, [x0, #56] + ldr x16, [x1, #72] + sbcs x17, x4, x17 + ldp x4, x2, [x2, #80] + ldr x1, [x1, #88] + sbcs x16, x16, x5 + sbcs x18, x18, x4 + ldp x4, x5, [x3, #48] + sbcs x1, x1, x2 + sbcs x12, x14, x12 + ldp x14, x2, [x3, #32] + sbcs x13, x15, x13 + sbcs x10, x11, x10 + ldp x11, x15, [x3, #16] + sbcs x8, x9, x8 + ngcs x9, xzr + tst x9, #0x1 + ldp x9, x3, [x3] + csel x5, x5, xzr, ne + csel x4, x4, xzr, ne + csel x2, x2, xzr, ne + csel x14, x14, xzr, ne + csel x15, x15, xzr, ne + csel x11, x11, xzr, ne + csel x3, x3, xzr, ne + csel x9, x9, xzr, ne + adds x9, x9, x17 + str x9, [x0, #64] + adcs x9, x3, x16 + str x9, [x0, #72] + adcs x9, x11, x18 + str x9, [x0, #80] + adcs x9, x15, x1 + str x9, [x0, #88] + adcs x9, x14, x12 + str x9, [x0, #96] + adcs x9, x2, x13 + str x9, [x0, #104] + adcs x9, x4, x10 + adcs x8, x5, x8 + stp x9, x8, [x0, #112] + ret +.Lfunc_end125: + .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L + + .align 2 + .type .LmulPv576x64,@function +.LmulPv576x64: // @mulPv576x64 +// BB#0: + ldr x9, [x0] + mul x10, x9, x1 + str x10, [x8] + ldr x10, [x0, #8] + umulh x9, x9, x1 + mul x11, x10, x1 + adds x9, x9, x11 + str x9, [x8, #8] + ldr x9, [x0, #16] + umulh x10, x10, x1 + mul x11, x9, x1 + adcs x10, x10, x11 + str x10, [x8, #16] + ldr x10, [x0, #24] + umulh x9, x9, x1 + mul x11, x10, x1 + adcs x9, x9, x11 + str x9, [x8, #24] + ldr x9, [x0, #32] + umulh x10, x10, x1 + mul x11, x9, x1 + adcs x10, x10, x11 + str x10, [x8, #32] + ldr x10, [x0, #40] + umulh x9, x9, x1 + mul x11, x10, x1 + adcs x9, x9, x11 + str x9, [x8, #40] + ldr x9, [x0, #48] + umulh x10, x10, x1 + mul x11, x9, x1 + adcs x10, x10, x11 + str x10, [x8, #48] + ldr x10, [x0, #56] + umulh x9, x9, x1 + mul x11, x10, x1 + adcs x9, x9, x11 + str x9, [x8, #56] + ldr x9, [x0, #64] + umulh x10, x10, x1 + mul x11, x9, x1 + umulh x9, x9, x1 + adcs x10, x10, x11 + adcs x9, x9, xzr + stp x10, x9, [x8, #64] + ret +.Lfunc_end126: + .size .LmulPv576x64, .Lfunc_end126-.LmulPv576x64 + + .globl mcl_fp_mulUnitPre9L + .align 2 + .type mcl_fp_mulUnitPre9L,@function +mcl_fp_mulUnitPre9L: // @mcl_fp_mulUnitPre9L +// BB#0: + stp x20, x19, [sp, #-32]! + stp x29, x30, [sp, #16] + add x29, sp, #16 // =16 + sub sp, sp, #80 // =80 + mov x19, x0 + mov x8, sp + mov x0, x1 + mov x1, x2 + bl .LmulPv576x64 + ldp x9, x8, [sp, #64] + ldp x11, x10, [sp, #48] + ldp x13, x12, [sp, #32] + ldp x14, x15, [sp] + ldp x16, x17, [sp, #16] + stp x14, x15, [x19] + stp x16, x17, [x19, #16] + stp x13, x12, [x19, #32] + stp x11, x10, [x19, #48] + stp x9, x8, [x19, #64] + sub sp, x29, #16 // =16 + ldp x29, x30, [sp, #16] + ldp x20, x19, [sp], #32 + ret +.Lfunc_end127: + .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L + + .globl mcl_fpDbl_mulPre9L + .align 2 + .type mcl_fpDbl_mulPre9L,@function +mcl_fpDbl_mulPre9L: // @mcl_fpDbl_mulPre9L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #752 // =752 + mov x21, x2 + ldr x9, [x21] + mov x20, x1 + mov x19, x0 + sub x8, x29, #160 // =160 + mov x0, x20 + mov x1, x9 + bl .LmulPv576x64 + ldur x8, [x29, #-88] + str x8, [sp, #24] // 8-byte Folded Spill + ldur x8, [x29, #-96] + str x8, [sp, #16] // 8-byte Folded Spill + ldp x25, x24, [x29, #-112] + ldp x27, x26, [x29, #-128] + ldp x22, x28, [x29, #-144] + ldp x8, x23, [x29, #-160] + ldr x1, [x21, #8] + str x8, [x19] + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [x29, #-176] + ldp x11, x10, [x29, #-192] + ldp x13, x12, [x29, #-208] + ldp x14, x16, [x29, #-240] + ldp x17, x15, [x29, #-224] + adds x14, x14, x23 + str x14, [x19, #8] + adcs x22, x16, x22 + adcs x23, x17, x28 + adcs x27, x15, x27 + adcs x26, x13, x26 + adcs x25, x12, x25 + adcs x24, x11, x24 + ldr x1, [x21, #16] + ldr x11, [sp, #16] // 8-byte Folded Reload + adcs x28, x10, x11 + ldr x10, [sp, #24] // 8-byte Folded Reload + adcs x9, x9, x10 + adcs x8, x8, xzr + stp x8, x9, [sp, #16] + add x8, sp, #512 // =512 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #584] + ldr x9, [sp, #576] + ldr x10, [sp, #568] + ldr x11, [sp, #560] + ldr x12, [sp, #552] + ldr x13, [sp, #544] + ldr x14, [sp, #512] + ldr x15, [sp, #536] + ldr x16, [sp, #520] + ldr x17, [sp, #528] + adds x14, x22, x14 + str x14, [x19, #16] + adcs x22, x23, x16 + adcs x23, x27, x17 + adcs x26, x26, x15 + adcs x25, x25, x13 + adcs x24, x24, x12 + adcs x27, x28, x11 + ldr x1, [x21, #24] + ldr x11, [sp, #24] // 8-byte Folded Reload + adcs x28, x11, x10 + ldr x10, [sp, #16] // 8-byte Folded Reload + adcs x9, x10, x9 + adcs x8, x8, xzr + stp x8, x9, [sp, #16] + add x8, sp, #432 // =432 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #496] + ldp x11, x10, [sp, #480] + ldp x13, x12, [sp, #464] + ldp x14, x16, [sp, #432] + ldp x17, x15, [sp, #448] + adds x14, x22, x14 + str x14, [x19, #24] + adcs x22, x23, x16 + adcs x23, x26, x17 + adcs x25, x25, x15 + adcs x24, x24, x13 + adcs x26, x27, x12 + adcs x27, x28, x11 + ldr x1, [x21, #32] + ldr x11, [sp, #24] // 8-byte Folded Reload + adcs x28, x11, x10 + ldr x10, [sp, #16] // 8-byte Folded Reload + adcs x9, x10, x9 + adcs x8, x8, xzr + stp x8, x9, [sp, #16] + add x8, sp, #352 // =352 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #416] + ldp x11, x10, [sp, #400] + ldp x13, x12, [sp, #384] + ldp x14, x16, [sp, #352] + ldp x17, x15, [sp, #368] + adds x14, x22, x14 + str x14, [x19, #32] + adcs x22, x23, x16 + adcs x23, x25, x17 + adcs x24, x24, x15 + adcs x25, x26, x13 + adcs x26, x27, x12 + adcs x27, x28, x11 + ldr x1, [x21, #40] + ldr x11, [sp, #24] // 8-byte Folded Reload + adcs x28, x11, x10 + ldr x10, [sp, #16] // 8-byte Folded Reload + adcs x9, x10, x9 + adcs x8, x8, xzr + stp x8, x9, [sp, #16] + add x8, sp, #272 // =272 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #336] + ldp x11, x10, [sp, #320] + ldp x13, x12, [sp, #304] + ldp x14, x16, [sp, #272] + ldp x17, x15, [sp, #288] + adds x14, x22, x14 + str x14, [x19, #40] + adcs x22, x23, x16 + adcs x23, x24, x17 + adcs x24, x25, x15 + adcs x25, x26, x13 + adcs x26, x27, x12 + adcs x27, x28, x11 + ldr x1, [x21, #48] + ldr x11, [sp, #24] // 8-byte Folded Reload + adcs x28, x11, x10 + ldr x10, [sp, #16] // 8-byte Folded Reload + adcs x9, x10, x9 + adcs x8, x8, xzr + stp x8, x9, [sp, #16] + add x8, sp, #192 // =192 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #256] + ldp x11, x10, [sp, #240] + ldp x13, x12, [sp, #224] + ldp x14, x16, [sp, #192] + ldp x17, x15, [sp, #208] + adds x14, x22, x14 + str x14, [x19, #48] + adcs x22, x23, x16 + adcs x23, x24, x17 + adcs x24, x25, x15 + adcs x25, x26, x13 + adcs x26, x27, x12 + adcs x27, x28, x11 + ldr x1, [x21, #56] + ldr x11, [sp, #24] // 8-byte Folded Reload + adcs x28, x11, x10 + ldr x10, [sp, #16] // 8-byte Folded Reload + adcs x9, x10, x9 + adcs x8, x8, xzr + stp x8, x9, [sp, #16] + add x8, sp, #112 // =112 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #176] + ldp x11, x10, [sp, #160] + ldp x13, x12, [sp, #144] + ldp x14, x16, [sp, #112] + ldp x17, x15, [sp, #128] + adds x14, x22, x14 + str x14, [x19, #56] + adcs x22, x23, x16 + adcs x23, x24, x17 + adcs x24, x25, x15 + adcs x25, x26, x13 + adcs x26, x27, x12 + adcs x27, x28, x11 + ldr x1, [x21, #64] + ldr x11, [sp, #24] // 8-byte Folded Reload + adcs x21, x11, x10 + ldr x10, [sp, #16] // 8-byte Folded Reload + adcs x28, x10, x9 + adcs x8, x8, xzr + str x8, [sp, #24] // 8-byte Folded Spill + add x8, sp, #32 // =32 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #96] + ldp x11, x10, [sp, #80] + ldp x13, x12, [sp, #64] + ldp x14, x16, [sp, #32] + ldp x17, x15, [sp, #48] + adds x14, x22, x14 + str x14, [x19, #64] + adcs x14, x23, x16 + str x14, [x19, #72] + adcs x14, x24, x17 + str x14, [x19, #80] + adcs x14, x25, x15 + adcs x13, x26, x13 + stp x14, x13, [x19, #88] + adcs x12, x27, x12 + adcs x11, x21, x11 + stp x12, x11, [x19, #104] + adcs x10, x28, x10 + str x10, [x19, #120] + ldr x10, [sp, #24] // 8-byte Folded Reload + adcs x9, x10, x9 + adcs x8, x8, xzr + stp x9, x8, [x19, #128] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end128: + .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L + + .globl mcl_fpDbl_sqrPre9L + .align 2 + .type mcl_fpDbl_sqrPre9L,@function +mcl_fpDbl_sqrPre9L: // @mcl_fpDbl_sqrPre9L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #736 // =736 + mov x20, x1 + ldr x1, [x20] + mov x19, x0 + sub x8, x29, #160 // =160 + mov x0, x20 + bl .LmulPv576x64 + ldur x8, [x29, #-88] + str x8, [sp, #8] // 8-byte Folded Spill + ldp x23, x22, [x29, #-104] + ldp x25, x24, [x29, #-120] + ldp x27, x26, [x29, #-136] + ldp x21, x28, [x29, #-152] + ldur x8, [x29, #-160] + ldr x1, [x20, #8] + str x8, [x19] + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [x29, #-176] + ldp x11, x10, [x29, #-192] + ldp x13, x12, [x29, #-208] + ldp x14, x16, [x29, #-240] + ldp x17, x15, [x29, #-224] + adds x14, x14, x21 + str x14, [x19, #8] + adcs x21, x16, x28 + adcs x27, x17, x27 + adcs x26, x15, x26 + adcs x25, x13, x25 + adcs x24, x12, x24 + adcs x23, x11, x23 + ldr x1, [x20, #16] + adcs x22, x10, x22 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x28, x9, x10 + adcs x8, x8, xzr + str x8, [sp, #8] // 8-byte Folded Spill + add x8, sp, #496 // =496 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #568] + ldr x9, [sp, #560] + ldr x10, [sp, #552] + ldr x11, [sp, #544] + ldr x12, [sp, #536] + ldr x13, [sp, #528] + ldp x14, x16, [sp, #496] + ldr x15, [sp, #520] + ldr x17, [sp, #512] + adds x14, x21, x14 + str x14, [x19, #16] + adcs x21, x27, x16 + adcs x26, x26, x17 + adcs x25, x25, x15 + adcs x24, x24, x13 + adcs x23, x23, x12 + adcs x22, x22, x11 + ldr x1, [x20, #24] + adcs x27, x28, x10 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x28, x10, x9 + adcs x8, x8, xzr + str x8, [sp, #8] // 8-byte Folded Spill + add x8, sp, #416 // =416 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #480] + ldp x11, x10, [sp, #464] + ldp x13, x12, [sp, #448] + ldp x14, x16, [sp, #416] + ldp x17, x15, [sp, #432] + adds x14, x21, x14 + str x14, [x19, #24] + adcs x21, x26, x16 + adcs x25, x25, x17 + adcs x24, x24, x15 + adcs x23, x23, x13 + adcs x22, x22, x12 + adcs x26, x27, x11 + ldr x1, [x20, #32] + adcs x27, x28, x10 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x28, x10, x9 + adcs x8, x8, xzr + str x8, [sp, #8] // 8-byte Folded Spill + add x8, sp, #336 // =336 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #400] + ldp x11, x10, [sp, #384] + ldp x13, x12, [sp, #368] + ldp x14, x16, [sp, #336] + ldp x17, x15, [sp, #352] + adds x14, x21, x14 + str x14, [x19, #32] + adcs x21, x25, x16 + adcs x24, x24, x17 + adcs x23, x23, x15 + adcs x22, x22, x13 + adcs x25, x26, x12 + adcs x26, x27, x11 + ldr x1, [x20, #40] + adcs x27, x28, x10 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x28, x10, x9 + adcs x8, x8, xzr + str x8, [sp, #8] // 8-byte Folded Spill + add x8, sp, #256 // =256 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #320] + ldp x11, x10, [sp, #304] + ldp x13, x12, [sp, #288] + ldp x14, x16, [sp, #256] + ldp x17, x15, [sp, #272] + adds x14, x21, x14 + str x14, [x19, #40] + adcs x21, x24, x16 + adcs x23, x23, x17 + adcs x22, x22, x15 + adcs x24, x25, x13 + adcs x25, x26, x12 + adcs x26, x27, x11 + ldr x1, [x20, #48] + adcs x27, x28, x10 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x28, x10, x9 + adcs x8, x8, xzr + str x8, [sp, #8] // 8-byte Folded Spill + add x8, sp, #176 // =176 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #240] + ldp x11, x10, [sp, #224] + ldp x13, x12, [sp, #208] + ldp x14, x16, [sp, #176] + ldp x17, x15, [sp, #192] + adds x14, x21, x14 + str x14, [x19, #48] + adcs x21, x23, x16 + adcs x22, x22, x17 + adcs x23, x24, x15 + adcs x24, x25, x13 + adcs x25, x26, x12 + adcs x26, x27, x11 + ldr x1, [x20, #56] + adcs x27, x28, x10 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x28, x10, x9 + adcs x8, x8, xzr + str x8, [sp, #8] // 8-byte Folded Spill + add x8, sp, #96 // =96 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #160] + ldp x11, x10, [sp, #144] + ldp x13, x12, [sp, #128] + ldp x14, x16, [sp, #96] + ldp x17, x15, [sp, #112] + adds x14, x21, x14 + str x14, [x19, #56] + adcs x21, x22, x16 + adcs x22, x23, x17 + adcs x23, x24, x15 + adcs x24, x25, x13 + adcs x25, x26, x12 + adcs x26, x27, x11 + ldr x1, [x20, #64] + adcs x27, x28, x10 + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x28, x10, x9 + adcs x8, x8, xzr + str x8, [sp, #8] // 8-byte Folded Spill + add x8, sp, #16 // =16 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #80] + ldp x11, x10, [sp, #64] + ldp x13, x12, [sp, #48] + ldp x14, x16, [sp, #16] + ldp x17, x15, [sp, #32] + adds x14, x21, x14 + str x14, [x19, #64] + adcs x14, x22, x16 + str x14, [x19, #72] + adcs x14, x23, x17 + str x14, [x19, #80] + adcs x14, x24, x15 + adcs x13, x25, x13 + stp x14, x13, [x19, #88] + adcs x12, x26, x12 + adcs x11, x27, x11 + stp x12, x11, [x19, #104] + adcs x10, x28, x10 + str x10, [x19, #120] + ldr x10, [sp, #8] // 8-byte Folded Reload + adcs x9, x10, x9 + adcs x8, x8, xzr + stp x9, x8, [x19, #128] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end129: + .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L + + .globl mcl_fp_mont9L + .align 2 + .type mcl_fp_mont9L,@function +mcl_fp_mont9L: // @mcl_fp_mont9L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #1600 // =1600 + mov x20, x3 + mov x28, x2 + str x28, [sp, #136] // 8-byte Folded Spill + ldur x19, [x20, #-8] + str x19, [sp, #144] // 8-byte Folded Spill + ldr x9, [x28] + mov x23, x1 + str x23, [sp, #152] // 8-byte Folded Spill + str x0, [sp, #128] // 8-byte Folded Spill + sub x8, x29, #160 // =160 + mov x0, x23 + mov x1, x9 + bl .LmulPv576x64 + ldur x24, [x29, #-160] + ldur x8, [x29, #-88] + str x8, [sp, #120] // 8-byte Folded Spill + ldur x8, [x29, #-96] + str x8, [sp, #112] // 8-byte Folded Spill + ldur x8, [x29, #-104] + str x8, [sp, #104] // 8-byte Folded Spill + ldur x8, [x29, #-112] + str x8, [sp, #96] // 8-byte Folded Spill + ldur x8, [x29, #-120] + str x8, [sp, #88] // 8-byte Folded Spill + ldur x8, [x29, #-128] + str x8, [sp, #80] // 8-byte Folded Spill + ldur x8, [x29, #-136] + str x8, [sp, #72] // 8-byte Folded Spill + ldur x8, [x29, #-144] + str x8, [sp, #64] // 8-byte Folded Spill + ldur x8, [x29, #-152] + str x8, [sp, #48] // 8-byte Folded Spill + mul x1, x24, x19 + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv576x64 + ldur x8, [x29, #-168] + str x8, [sp, #56] // 8-byte Folded Spill + ldur x8, [x29, #-176] + str x8, [sp, #40] // 8-byte Folded Spill + ldur x8, [x29, #-184] + str x8, [sp, #32] // 8-byte Folded Spill + ldur x8, [x29, #-192] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x21, x19, [x29, #-208] + ldp x26, x22, [x29, #-224] + ldp x27, x25, [x29, #-240] + ldr x1, [x28, #8] + add x8, sp, #1360 // =1360 + mov x0, x23 + bl .LmulPv576x64 + cmn x27, x24 + ldr x8, [sp, #1432] + ldr x9, [sp, #1424] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x25, x10 + ldr x11, [sp, #1416] + ldp x12, x14, [sp, #64] + adcs x12, x26, x12 + ldr x13, [sp, #1408] + adcs x14, x22, x14 + ldr x15, [sp, #1400] + ldp x16, x18, [sp, #80] + adcs x16, x21, x16 + ldr x17, [sp, #1392] + adcs x18, x19, x18 + ldr x0, [sp, #1384] + ldp x1, x3, [sp, #96] + ldp x2, x4, [sp, #24] + adcs x1, x2, x1 + ldr x2, [sp, #1376] + adcs x3, x4, x3 + ldr x4, [sp, #1360] + ldp x5, x7, [sp, #112] + ldr x6, [sp, #40] // 8-byte Folded Reload + adcs x5, x6, x5 + ldr x6, [sp, #1368] + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x7, x19, x7 + adcs x19, xzr, xzr + adds x21, x10, x4 + adcs x10, x12, x6 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x2 + str x10, [sp, #104] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #96] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + adcs x8, x19, x8 + stp x8, x9, [sp, #112] + adcs x8, xzr, xzr + stp x8, x10, [sp, #56] + ldr x24, [sp, #144] // 8-byte Folded Reload + mul x1, x21, x24 + add x8, sp, #1280 // =1280 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #1352] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #1344] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1336] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1328] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x26, [sp, #1320] + ldr x27, [sp, #1312] + ldr x28, [sp, #1304] + ldr x22, [sp, #1296] + ldr x19, [sp, #1288] + ldr x23, [sp, #1280] + ldr x25, [sp, #136] // 8-byte Folded Reload + ldr x1, [x25, #16] + add x8, sp, #1200 // =1200 + ldr x0, [sp, #152] // 8-byte Folded Reload + bl .LmulPv576x64 + cmn x21, x23 + ldr x8, [sp, #1272] + ldr x9, [sp, #1264] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x10, x19 + ldr x11, [sp, #1256] + ldp x14, x12, [sp, #96] + adcs x12, x12, x22 + ldr x13, [sp, #1248] + adcs x14, x14, x28 + ldr x15, [sp, #1240] + ldp x18, x16, [sp, #80] + adcs x16, x16, x27 + ldr x17, [sp, #1232] + adcs x18, x18, x26 + ldr x0, [sp, #1224] + ldp x3, x1, [sp, #64] + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #1216] + adcs x3, x3, x4 + ldr x4, [sp, #1200] + ldp x7, x5, [sp, #112] + ldp x6, x19, [sp, #32] + adcs x5, x5, x6 + ldr x6, [sp, #1208] + adcs x7, x7, x19 + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x19, x19, xzr + adds x21, x10, x4 + adcs x10, x12, x6 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x2 + str x10, [sp, #104] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #96] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + adcs x8, x19, x8 + stp x8, x9, [sp, #112] + adcs x8, xzr, xzr + stp x8, x10, [sp, #56] + mul x1, x21, x24 + add x8, sp, #1120 // =1120 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #1192] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #1184] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1176] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1168] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x26, [sp, #1160] + ldr x27, [sp, #1152] + ldr x28, [sp, #1144] + ldr x22, [sp, #1136] + ldr x19, [sp, #1128] + ldr x23, [sp, #1120] + ldr x1, [x25, #24] + add x8, sp, #1040 // =1040 + ldr x24, [sp, #152] // 8-byte Folded Reload + mov x0, x24 + bl .LmulPv576x64 + cmn x21, x23 + ldr x8, [sp, #1112] + ldr x9, [sp, #1104] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x10, x19 + ldr x11, [sp, #1096] + ldp x14, x12, [sp, #96] + adcs x12, x12, x22 + ldr x13, [sp, #1088] + adcs x14, x14, x28 + ldr x15, [sp, #1080] + ldp x18, x16, [sp, #80] + adcs x16, x16, x27 + ldr x17, [sp, #1072] + adcs x18, x18, x26 + ldr x0, [sp, #1064] + ldp x3, x1, [sp, #64] + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #1056] + adcs x3, x3, x4 + ldr x4, [sp, #1040] + ldp x7, x5, [sp, #112] + ldp x6, x19, [sp, #32] + adcs x5, x5, x6 + ldr x6, [sp, #1048] + adcs x7, x7, x19 + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x19, x19, xzr + adds x21, x10, x4 + adcs x10, x12, x6 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x2 + str x10, [sp, #104] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #96] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + adcs x8, x19, x8 + stp x8, x9, [sp, #112] + adcs x8, xzr, xzr + stp x8, x10, [sp, #56] + ldr x8, [sp, #144] // 8-byte Folded Reload + mul x1, x21, x8 + add x8, sp, #960 // =960 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #1032] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #1024] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1016] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1008] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x26, [sp, #1000] + ldr x27, [sp, #992] + ldr x28, [sp, #984] + ldr x22, [sp, #976] + ldr x19, [sp, #968] + ldr x23, [sp, #960] + ldr x1, [x25, #32] + add x8, sp, #880 // =880 + mov x0, x24 + bl .LmulPv576x64 + cmn x21, x23 + ldr x8, [sp, #952] + ldr x9, [sp, #944] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x10, x19 + ldr x11, [sp, #936] + ldp x14, x12, [sp, #96] + adcs x12, x12, x22 + ldr x13, [sp, #928] + adcs x14, x14, x28 + ldr x15, [sp, #920] + ldp x18, x16, [sp, #80] + adcs x16, x16, x27 + ldr x17, [sp, #912] + adcs x18, x18, x26 + ldr x0, [sp, #904] + ldp x3, x1, [sp, #64] + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #896] + adcs x3, x3, x4 + ldr x4, [sp, #880] + ldp x7, x5, [sp, #112] + ldp x6, x19, [sp, #32] + adcs x5, x5, x6 + ldr x6, [sp, #888] + adcs x7, x7, x19 + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x19, x19, xzr + adds x21, x10, x4 + adcs x10, x12, x6 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x2 + str x10, [sp, #104] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #96] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + adcs x8, x19, x8 + stp x8, x9, [sp, #112] + adcs x8, xzr, xzr + stp x8, x10, [sp, #56] + ldr x25, [sp, #144] // 8-byte Folded Reload + mul x1, x21, x25 + add x8, sp, #800 // =800 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #872] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #864] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #856] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #848] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x26, [sp, #840] + ldr x27, [sp, #832] + ldr x28, [sp, #824] + ldr x22, [sp, #816] + ldr x19, [sp, #808] + ldr x23, [sp, #800] + ldr x24, [sp, #136] // 8-byte Folded Reload + ldr x1, [x24, #40] + add x8, sp, #720 // =720 + ldr x0, [sp, #152] // 8-byte Folded Reload + bl .LmulPv576x64 + cmn x21, x23 + ldr x8, [sp, #792] + ldr x9, [sp, #784] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x10, x19 + ldr x11, [sp, #776] + ldp x14, x12, [sp, #96] + adcs x12, x12, x22 + ldr x13, [sp, #768] + adcs x14, x14, x28 + ldr x15, [sp, #760] + ldp x18, x16, [sp, #80] + adcs x16, x16, x27 + ldr x17, [sp, #752] + adcs x18, x18, x26 + ldr x0, [sp, #744] + ldp x3, x1, [sp, #64] + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #736] + adcs x3, x3, x4 + ldr x4, [sp, #720] + ldp x7, x5, [sp, #112] + ldp x6, x19, [sp, #32] + adcs x5, x5, x6 + ldr x6, [sp, #728] + adcs x7, x7, x19 + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x19, x19, xzr + adds x21, x10, x4 + adcs x10, x12, x6 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x2 + str x10, [sp, #104] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #96] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + adcs x8, x19, x8 + stp x8, x9, [sp, #112] + adcs x8, xzr, xzr + stp x8, x10, [sp, #56] + mul x1, x21, x25 + add x8, sp, #640 // =640 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #712] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #704] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #696] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #688] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x26, [sp, #680] + ldr x27, [sp, #672] + ldr x28, [sp, #664] + ldr x22, [sp, #656] + ldr x19, [sp, #648] + ldr x23, [sp, #640] + ldr x1, [x24, #48] + add x8, sp, #560 // =560 + ldr x25, [sp, #152] // 8-byte Folded Reload + mov x0, x25 + bl .LmulPv576x64 + cmn x21, x23 + ldr x8, [sp, #632] + ldr x9, [sp, #624] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x10, x19 + ldr x11, [sp, #616] + ldp x14, x12, [sp, #96] + adcs x12, x12, x22 + ldr x13, [sp, #608] + adcs x14, x14, x28 + ldr x15, [sp, #600] + ldp x18, x16, [sp, #80] + adcs x16, x16, x27 + ldr x17, [sp, #592] + adcs x18, x18, x26 + ldr x0, [sp, #584] + ldp x3, x1, [sp, #64] + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldr x2, [sp, #576] + adcs x3, x3, x4 + ldr x4, [sp, #560] + ldp x7, x5, [sp, #112] + ldp x6, x19, [sp, #32] + adcs x5, x5, x6 + ldr x6, [sp, #568] + adcs x7, x7, x19 + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x19, x19, xzr + adds x21, x10, x4 + adcs x10, x12, x6 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x2 + str x10, [sp, #104] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #96] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + adcs x8, x19, x8 + stp x8, x9, [sp, #112] + adcs x8, xzr, xzr + stp x8, x10, [sp, #56] + ldr x24, [sp, #144] // 8-byte Folded Reload + mul x1, x21, x24 + add x8, sp, #480 // =480 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #552] + str x8, [sp, #40] // 8-byte Folded Spill + ldr x8, [sp, #544] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #536] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #528] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x26, [sp, #520] + ldr x27, [sp, #512] + ldp x22, x28, [sp, #496] + ldp x23, x19, [sp, #480] + ldr x8, [sp, #136] // 8-byte Folded Reload + ldr x1, [x8, #56] + add x8, sp, #400 // =400 + mov x0, x25 + bl .LmulPv576x64 + cmn x21, x23 + ldp x9, x8, [sp, #464] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x10, x19 + ldp x13, x11, [sp, #448] + ldp x14, x12, [sp, #96] + adcs x12, x12, x22 + adcs x14, x14, x28 + ldp x17, x15, [sp, #432] + ldp x18, x16, [sp, #80] + adcs x16, x16, x27 + adcs x18, x18, x26 + ldp x3, x1, [sp, #64] + ldp x2, x4, [sp, #16] + adcs x1, x1, x2 + ldp x2, x0, [sp, #416] + adcs x3, x3, x4 + ldp x7, x5, [sp, #112] + ldp x6, x19, [sp, #32] + adcs x5, x5, x6 + ldp x4, x6, [sp, #400] + adcs x7, x7, x19 + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x19, x19, xzr + adds x21, x10, x4 + adcs x10, x12, x6 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x14, x2 + str x10, [sp, #104] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #96] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #88] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + adcs x8, x19, x8 + stp x8, x9, [sp, #112] + adcs x8, xzr, xzr + stp x8, x10, [sp, #56] + mul x1, x21, x24 + add x8, sp, #320 // =320 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #392] + str x8, [sp, #40] // 8-byte Folded Spill + ldp x24, x8, [sp, #376] + str x8, [sp, #32] // 8-byte Folded Spill + ldp x26, x25, [sp, #360] + ldp x28, x27, [sp, #344] + ldp x19, x22, [sp, #328] + ldr x23, [sp, #320] + ldr x8, [sp, #136] // 8-byte Folded Reload + ldr x1, [x8, #64] + add x8, sp, #240 // =240 + ldr x0, [sp, #152] // 8-byte Folded Reload + bl .LmulPv576x64 + cmn x21, x23 + ldp x9, x8, [sp, #304] + ldr x10, [sp, #48] // 8-byte Folded Reload + adcs x10, x10, x19 + ldp x13, x11, [sp, #288] + ldp x14, x12, [sp, #96] + adcs x12, x12, x22 + adcs x14, x14, x28 + ldp x17, x15, [sp, #272] + ldp x18, x16, [sp, #80] + adcs x16, x16, x27 + adcs x18, x18, x26 + ldp x2, x0, [sp, #256] + ldp x3, x1, [sp, #64] + adcs x1, x1, x25 + adcs x3, x3, x24 + ldp x7, x5, [sp, #112] + ldp x6, x19, [sp, #32] + adcs x5, x5, x6 + ldp x4, x6, [sp, #240] + adcs x7, x7, x19 + ldr x19, [sp, #56] // 8-byte Folded Reload + adcs x19, x19, xzr + adds x21, x10, x4 + adcs x22, x12, x6 + adcs x23, x14, x2 + adcs x24, x16, x0 + adcs x25, x18, x17 + adcs x26, x1, x15 + adcs x27, x3, x13 + adcs x10, x5, x11 + str x10, [sp, #152] // 8-byte Folded Spill + adcs x9, x7, x9 + str x9, [sp, #136] // 8-byte Folded Spill + adcs x19, x19, x8 + adcs x28, xzr, xzr + ldr x8, [sp, #144] // 8-byte Folded Reload + mul x1, x21, x8 + add x8, sp, #160 // =160 + mov x0, x20 + bl .LmulPv576x64 + ldp x16, x8, [sp, #224] + ldp x9, x10, [sp, #160] + ldp x11, x12, [sp, #176] + cmn x21, x9 + ldp x13, x9, [sp, #192] + adcs x10, x22, x10 + ldp x14, x15, [sp, #208] + adcs x11, x23, x11 + adcs x12, x24, x12 + adcs x13, x25, x13 + adcs x9, x26, x9 + adcs x14, x27, x14 + ldp x0, x17, [x20, #56] + ldp x2, x18, [x20, #40] + ldp x4, x1, [x20, #24] + ldp x6, x3, [x20, #8] + ldr x5, [x20] + ldr x7, [sp, #152] // 8-byte Folded Reload + adcs x15, x7, x15 + ldr x7, [sp, #136] // 8-byte Folded Reload + adcs x16, x7, x16 + adcs x8, x19, x8 + adcs x7, x28, xzr + subs x5, x10, x5 + sbcs x6, x11, x6 + sbcs x3, x12, x3 + sbcs x4, x13, x4 + sbcs x1, x9, x1 + sbcs x2, x14, x2 + sbcs x18, x15, x18 + sbcs x0, x16, x0 + sbcs x17, x8, x17 + sbcs x7, x7, xzr + tst x7, #0x1 + csel x10, x10, x5, ne + csel x11, x11, x6, ne + csel x12, x12, x3, ne + csel x13, x13, x4, ne + csel x9, x9, x1, ne + csel x14, x14, x2, ne + csel x15, x15, x18, ne + csel x16, x16, x0, ne + csel x8, x8, x17, ne + ldr x17, [sp, #128] // 8-byte Folded Reload + stp x10, x11, [x17] + stp x12, x13, [x17, #16] + stp x9, x14, [x17, #32] + stp x15, x16, [x17, #48] + str x8, [x17, #64] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end130: + .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L + + .globl mcl_fp_montNF9L + .align 2 + .type mcl_fp_montNF9L,@function +mcl_fp_montNF9L: // @mcl_fp_montNF9L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #1584 // =1584 + mov x20, x3 + mov x28, x2 + str x28, [sp, #120] // 8-byte Folded Spill + ldur x19, [x20, #-8] + str x19, [sp, #128] // 8-byte Folded Spill + ldr x9, [x28] + mov x23, x1 + str x23, [sp, #136] // 8-byte Folded Spill + str x0, [sp, #112] // 8-byte Folded Spill + sub x8, x29, #160 // =160 + mov x0, x23 + mov x1, x9 + bl .LmulPv576x64 + ldur x24, [x29, #-160] + ldur x8, [x29, #-88] + str x8, [sp, #104] // 8-byte Folded Spill + ldur x8, [x29, #-96] + str x8, [sp, #96] // 8-byte Folded Spill + ldur x8, [x29, #-104] + str x8, [sp, #88] // 8-byte Folded Spill + ldur x8, [x29, #-112] + str x8, [sp, #80] // 8-byte Folded Spill + ldur x8, [x29, #-120] + str x8, [sp, #72] // 8-byte Folded Spill + ldur x8, [x29, #-128] + str x8, [sp, #64] // 8-byte Folded Spill + ldur x8, [x29, #-136] + str x8, [sp, #56] // 8-byte Folded Spill + ldur x8, [x29, #-144] + str x8, [sp, #48] // 8-byte Folded Spill + ldur x8, [x29, #-152] + str x8, [sp, #32] // 8-byte Folded Spill + mul x1, x24, x19 + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv576x64 + ldur x8, [x29, #-168] + str x8, [sp, #40] // 8-byte Folded Spill + ldur x8, [x29, #-176] + str x8, [sp, #24] // 8-byte Folded Spill + ldur x8, [x29, #-184] + str x8, [sp, #16] // 8-byte Folded Spill + ldur x8, [x29, #-192] + str x8, [sp, #8] // 8-byte Folded Spill + ldp x21, x19, [x29, #-208] + ldp x26, x22, [x29, #-224] + ldp x27, x25, [x29, #-240] + ldr x1, [x28, #8] + add x8, sp, #1344 // =1344 + mov x0, x23 + bl .LmulPv576x64 + cmn x27, x24 + ldr x8, [sp, #1416] + ldr x9, [sp, #1408] + ldr x10, [sp, #32] // 8-byte Folded Reload + adcs x10, x25, x10 + ldr x11, [sp, #1400] + ldp x12, x14, [sp, #48] + adcs x12, x26, x12 + ldr x13, [sp, #1392] + adcs x14, x22, x14 + ldr x15, [sp, #1384] + ldp x16, x18, [sp, #64] + adcs x16, x21, x16 + ldr x17, [sp, #1376] + adcs x18, x19, x18 + ldr x0, [sp, #1368] + ldp x1, x3, [sp, #80] + ldp x2, x4, [sp, #8] + adcs x1, x2, x1 + ldr x2, [sp, #1352] + adcs x3, x4, x3 + ldr x4, [sp, #1344] + ldp x5, x7, [sp, #96] + ldr x6, [sp, #24] // 8-byte Folded Reload + adcs x5, x6, x5 + ldr x6, [sp, #1360] + ldr x19, [sp, #40] // 8-byte Folded Reload + adcs x7, x19, x7 + adds x19, x10, x4 + adcs x10, x12, x2 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x6 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x22, [sp, #128] // 8-byte Folded Reload + mul x1, x19, x22 + add x8, sp, #1264 // =1264 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #1336] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1328] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1320] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #1312] + str x8, [sp, #8] // 8-byte Folded Spill + ldr x24, [sp, #1304] + ldr x25, [sp, #1296] + ldr x26, [sp, #1288] + ldr x21, [sp, #1280] + ldr x27, [sp, #1272] + ldr x28, [sp, #1264] + ldr x23, [sp, #120] // 8-byte Folded Reload + ldr x1, [x23, #16] + add x8, sp, #1184 // =1184 + ldr x0, [sp, #136] // 8-byte Folded Reload + bl .LmulPv576x64 + cmn x19, x28 + ldr x8, [sp, #1256] + ldr x9, [sp, #1248] + ldp x10, x1, [sp, #40] + adcs x10, x10, x27 + ldr x11, [sp, #1240] + ldp x14, x12, [sp, #72] + adcs x12, x12, x21 + ldr x13, [sp, #1232] + adcs x14, x14, x26 + ldr x15, [sp, #1224] + ldp x18, x16, [sp, #56] + adcs x16, x16, x25 + ldr x17, [sp, #1216] + adcs x18, x18, x24 + ldr x0, [sp, #1208] + ldp x2, x4, [sp, #8] + adcs x1, x1, x2 + ldr x2, [sp, #1192] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldr x4, [sp, #1184] + ldp x6, x19, [sp, #24] + adcs x5, x5, x6 + ldr x6, [sp, #1200] + ldr x7, [sp, #88] // 8-byte Folded Reload + adcs x7, x7, x19 + adds x19, x10, x4 + adcs x10, x12, x2 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x6 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + mul x1, x19, x22 + add x8, sp, #1104 // =1104 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #1176] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1168] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1160] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #1152] + str x8, [sp, #8] // 8-byte Folded Spill + ldr x24, [sp, #1144] + ldr x25, [sp, #1136] + ldr x26, [sp, #1128] + ldr x21, [sp, #1120] + ldr x27, [sp, #1112] + ldr x28, [sp, #1104] + ldr x1, [x23, #24] + add x8, sp, #1024 // =1024 + ldr x22, [sp, #136] // 8-byte Folded Reload + mov x0, x22 + bl .LmulPv576x64 + cmn x19, x28 + ldr x8, [sp, #1096] + ldr x9, [sp, #1088] + ldp x10, x1, [sp, #40] + adcs x10, x10, x27 + ldr x11, [sp, #1080] + ldp x14, x12, [sp, #72] + adcs x12, x12, x21 + ldr x13, [sp, #1072] + adcs x14, x14, x26 + ldr x15, [sp, #1064] + ldp x18, x16, [sp, #56] + adcs x16, x16, x25 + ldr x17, [sp, #1056] + adcs x18, x18, x24 + ldr x0, [sp, #1048] + ldp x2, x4, [sp, #8] + adcs x1, x1, x2 + ldr x2, [sp, #1032] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldr x4, [sp, #1024] + ldp x6, x19, [sp, #24] + adcs x5, x5, x6 + ldr x6, [sp, #1040] + ldr x7, [sp, #88] // 8-byte Folded Reload + adcs x7, x7, x19 + adds x19, x10, x4 + adcs x10, x12, x2 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x6 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #128] // 8-byte Folded Reload + mul x1, x19, x8 + add x8, sp, #944 // =944 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #1016] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #1008] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #1000] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #992] + str x8, [sp, #8] // 8-byte Folded Spill + ldr x24, [sp, #984] + ldr x25, [sp, #976] + ldr x26, [sp, #968] + ldr x21, [sp, #960] + ldr x27, [sp, #952] + ldr x28, [sp, #944] + ldr x1, [x23, #32] + add x8, sp, #864 // =864 + mov x0, x22 + bl .LmulPv576x64 + cmn x19, x28 + ldr x8, [sp, #936] + ldr x9, [sp, #928] + ldp x10, x1, [sp, #40] + adcs x10, x10, x27 + ldr x11, [sp, #920] + ldp x14, x12, [sp, #72] + adcs x12, x12, x21 + ldr x13, [sp, #912] + adcs x14, x14, x26 + ldr x15, [sp, #904] + ldp x18, x16, [sp, #56] + adcs x16, x16, x25 + ldr x17, [sp, #896] + adcs x18, x18, x24 + ldr x0, [sp, #888] + ldp x2, x4, [sp, #8] + adcs x1, x1, x2 + ldr x2, [sp, #872] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldr x4, [sp, #864] + ldp x6, x19, [sp, #24] + adcs x5, x5, x6 + ldr x6, [sp, #880] + ldr x7, [sp, #88] // 8-byte Folded Reload + adcs x7, x7, x19 + adds x19, x10, x4 + adcs x10, x12, x2 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x6 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x23, [sp, #128] // 8-byte Folded Reload + mul x1, x19, x23 + add x8, sp, #784 // =784 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #856] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #848] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #840] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #832] + str x8, [sp, #8] // 8-byte Folded Spill + ldr x24, [sp, #824] + ldr x25, [sp, #816] + ldr x26, [sp, #808] + ldr x21, [sp, #800] + ldr x27, [sp, #792] + ldr x28, [sp, #784] + ldr x22, [sp, #120] // 8-byte Folded Reload + ldr x1, [x22, #40] + add x8, sp, #704 // =704 + ldr x0, [sp, #136] // 8-byte Folded Reload + bl .LmulPv576x64 + cmn x19, x28 + ldr x8, [sp, #776] + ldr x9, [sp, #768] + ldp x10, x1, [sp, #40] + adcs x10, x10, x27 + ldr x11, [sp, #760] + ldp x14, x12, [sp, #72] + adcs x12, x12, x21 + ldr x13, [sp, #752] + adcs x14, x14, x26 + ldr x15, [sp, #744] + ldp x18, x16, [sp, #56] + adcs x16, x16, x25 + ldr x17, [sp, #736] + adcs x18, x18, x24 + ldr x0, [sp, #728] + ldp x2, x4, [sp, #8] + adcs x1, x1, x2 + ldr x2, [sp, #712] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldr x4, [sp, #704] + ldp x6, x19, [sp, #24] + adcs x5, x5, x6 + ldr x6, [sp, #720] + ldr x7, [sp, #88] // 8-byte Folded Reload + adcs x7, x7, x19 + adds x19, x10, x4 + adcs x10, x12, x2 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x6 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + mul x1, x19, x23 + add x8, sp, #624 // =624 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #696] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #688] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #680] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #672] + str x8, [sp, #8] // 8-byte Folded Spill + ldr x24, [sp, #664] + ldr x25, [sp, #656] + ldr x26, [sp, #648] + ldr x21, [sp, #640] + ldr x27, [sp, #632] + ldr x28, [sp, #624] + ldr x1, [x22, #48] + add x8, sp, #544 // =544 + ldr x23, [sp, #136] // 8-byte Folded Reload + mov x0, x23 + bl .LmulPv576x64 + cmn x19, x28 + ldr x8, [sp, #616] + ldr x9, [sp, #608] + ldp x10, x1, [sp, #40] + adcs x10, x10, x27 + ldr x11, [sp, #600] + ldp x14, x12, [sp, #72] + adcs x12, x12, x21 + ldr x13, [sp, #592] + adcs x14, x14, x26 + ldr x15, [sp, #584] + ldp x18, x16, [sp, #56] + adcs x16, x16, x25 + ldr x17, [sp, #576] + adcs x18, x18, x24 + ldr x0, [sp, #568] + ldp x2, x4, [sp, #8] + adcs x1, x1, x2 + ldr x2, [sp, #552] + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldr x4, [sp, #544] + ldp x6, x19, [sp, #24] + adcs x5, x5, x6 + ldr x6, [sp, #560] + ldr x7, [sp, #88] // 8-byte Folded Reload + adcs x7, x7, x19 + adds x19, x10, x4 + adcs x10, x12, x2 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x6 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x22, [sp, #128] // 8-byte Folded Reload + mul x1, x19, x22 + add x8, sp, #464 // =464 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #536] + str x8, [sp, #32] // 8-byte Folded Spill + ldr x8, [sp, #528] + str x8, [sp, #24] // 8-byte Folded Spill + ldr x8, [sp, #520] + str x8, [sp, #16] // 8-byte Folded Spill + ldr x8, [sp, #512] + str x8, [sp, #8] // 8-byte Folded Spill + ldp x25, x24, [sp, #496] + ldp x21, x26, [sp, #480] + ldp x28, x27, [sp, #464] + ldr x8, [sp, #120] // 8-byte Folded Reload + ldr x1, [x8, #56] + add x8, sp, #384 // =384 + mov x0, x23 + bl .LmulPv576x64 + cmn x19, x28 + ldp x9, x8, [sp, #448] + ldp x10, x1, [sp, #40] + adcs x10, x10, x27 + ldp x13, x11, [sp, #432] + ldp x14, x12, [sp, #72] + adcs x12, x12, x21 + adcs x14, x14, x26 + ldp x17, x15, [sp, #416] + ldp x18, x16, [sp, #56] + adcs x16, x16, x25 + adcs x18, x18, x24 + ldp x2, x4, [sp, #8] + adcs x1, x1, x2 + ldp x5, x3, [sp, #96] + adcs x3, x3, x4 + ldp x4, x2, [sp, #384] + ldp x6, x19, [sp, #24] + adcs x5, x5, x6 + ldp x6, x0, [sp, #400] + ldr x7, [sp, #88] // 8-byte Folded Reload + adcs x7, x7, x19 + adds x19, x10, x4 + adcs x10, x12, x2 + str x10, [sp, #40] // 8-byte Folded Spill + adcs x10, x14, x6 + str x10, [sp, #80] // 8-byte Folded Spill + adcs x10, x16, x0 + str x10, [sp, #72] // 8-byte Folded Spill + adcs x10, x18, x17 + str x10, [sp, #64] // 8-byte Folded Spill + adcs x10, x1, x15 + str x10, [sp, #56] // 8-byte Folded Spill + adcs x10, x3, x13 + str x10, [sp, #48] // 8-byte Folded Spill + adcs x10, x5, x11 + adcs x9, x7, x9 + stp x9, x10, [sp, #96] + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + mul x1, x19, x22 + add x8, sp, #304 // =304 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #376] + str x8, [sp, #32] // 8-byte Folded Spill + ldp x22, x8, [sp, #360] + str x8, [sp, #24] // 8-byte Folded Spill + ldp x24, x23, [sp, #344] + ldp x26, x25, [sp, #328] + ldp x27, x21, [sp, #312] + ldr x28, [sp, #304] + ldr x8, [sp, #120] // 8-byte Folded Reload + ldr x1, [x8, #64] + add x8, sp, #224 // =224 + ldr x0, [sp, #136] // 8-byte Folded Reload + bl .LmulPv576x64 + cmn x19, x28 + ldp x9, x8, [sp, #288] + ldp x10, x1, [sp, #40] + adcs x10, x10, x27 + ldp x13, x11, [sp, #272] + ldp x14, x12, [sp, #72] + adcs x12, x12, x21 + adcs x14, x14, x26 + ldp x17, x15, [sp, #256] + ldp x18, x16, [sp, #56] + adcs x16, x16, x25 + adcs x18, x18, x24 + adcs x1, x1, x23 + ldp x4, x2, [sp, #224] + ldp x5, x3, [sp, #96] + adcs x3, x3, x22 + ldp x6, x19, [sp, #24] + adcs x5, x5, x6 + ldp x6, x0, [sp, #240] + ldr x7, [sp, #88] // 8-byte Folded Reload + adcs x7, x7, x19 + adds x19, x10, x4 + adcs x21, x12, x2 + adcs x22, x14, x6 + adcs x23, x16, x0 + adcs x24, x18, x17 + adcs x25, x1, x15 + adcs x26, x3, x13 + adcs x10, x5, x11 + str x10, [sp, #136] // 8-byte Folded Spill + adcs x28, x7, x9 + adcs x27, x8, xzr + ldr x8, [sp, #128] // 8-byte Folded Reload + mul x1, x19, x8 + add x8, sp, #144 // =144 + mov x0, x20 + bl .LmulPv576x64 + ldp x16, x8, [sp, #208] + ldp x9, x10, [sp, #144] + ldp x11, x12, [sp, #160] + cmn x19, x9 + ldp x13, x9, [sp, #176] + adcs x10, x21, x10 + ldp x14, x15, [sp, #192] + adcs x11, x22, x11 + adcs x12, x23, x12 + adcs x13, x24, x13 + adcs x9, x25, x9 + adcs x14, x26, x14 + ldp x0, x17, [x20, #56] + ldp x2, x18, [x20, #40] + ldp x4, x1, [x20, #24] + ldp x6, x3, [x20, #8] + ldr x5, [x20] + ldr x7, [sp, #136] // 8-byte Folded Reload + adcs x15, x7, x15 + adcs x16, x28, x16 + adcs x8, x27, x8 + subs x5, x10, x5 + sbcs x6, x11, x6 + sbcs x3, x12, x3 + sbcs x4, x13, x4 + sbcs x1, x9, x1 + sbcs x2, x14, x2 + sbcs x18, x15, x18 + sbcs x0, x16, x0 + sbcs x17, x8, x17 + asr x7, x17, #63 + cmp x7, #0 // =0 + csel x10, x10, x5, lt + csel x11, x11, x6, lt + csel x12, x12, x3, lt + csel x13, x13, x4, lt + csel x9, x9, x1, lt + csel x14, x14, x2, lt + csel x15, x15, x18, lt + csel x16, x16, x0, lt + csel x8, x8, x17, lt + ldr x17, [sp, #112] // 8-byte Folded Reload + stp x10, x11, [x17] + stp x12, x13, [x17, #16] + stp x9, x14, [x17, #32] + stp x15, x16, [x17, #48] + str x8, [x17, #64] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end131: + .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L + + .globl mcl_fp_montRed9L + .align 2 + .type mcl_fp_montRed9L,@function +mcl_fp_montRed9L: // @mcl_fp_montRed9L +// BB#0: + stp x28, x27, [sp, #-96]! + stp x26, x25, [sp, #16] + stp x24, x23, [sp, #32] + stp x22, x21, [sp, #48] + stp x20, x19, [sp, #64] + stp x29, x30, [sp, #80] + add x29, sp, #80 // =80 + sub sp, sp, #912 // =912 + mov x20, x2 + ldur x9, [x20, #-8] + str x9, [sp, #40] // 8-byte Folded Spill + ldr x8, [x20, #64] + str x8, [sp, #184] // 8-byte Folded Spill + ldr x8, [x20, #48] + str x8, [sp, #168] // 8-byte Folded Spill + ldr x8, [x20, #56] + str x8, [sp, #176] // 8-byte Folded Spill + ldr x8, [x20, #32] + str x8, [sp, #144] // 8-byte Folded Spill + ldr x8, [x20, #40] + str x8, [sp, #152] // 8-byte Folded Spill + ldr x8, [x20, #16] + str x8, [sp, #128] // 8-byte Folded Spill + ldr x8, [x20, #24] + str x8, [sp, #136] // 8-byte Folded Spill + ldr x8, [x20] + str x8, [sp, #112] // 8-byte Folded Spill + ldr x8, [x20, #8] + str x8, [sp, #120] // 8-byte Folded Spill + ldr x8, [x1, #128] + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [x1, #136] + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [x1, #112] + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [x1, #120] + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [x1, #96] + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [x1, #104] + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [x1, #80] + str x8, [sp, #48] // 8-byte Folded Spill + ldr x8, [x1, #88] + str x8, [sp, #56] // 8-byte Folded Spill + ldp x23, x8, [x1, #64] + str x8, [sp, #16] // 8-byte Folded Spill + ldp x25, x19, [x1, #48] + ldp x28, x27, [x1, #32] + ldp x22, x24, [x1, #16] + ldp x21, x26, [x1] + str x0, [sp, #160] // 8-byte Folded Spill + mul x1, x21, x9 + sub x8, x29, #160 // =160 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [x29, #-96] + ldp x11, x10, [x29, #-112] + ldp x13, x12, [x29, #-128] + ldp x14, x15, [x29, #-160] + ldp x16, x17, [x29, #-144] + cmn x21, x14 + adcs x21, x26, x15 + adcs x14, x22, x16 + adcs x24, x24, x17 + adcs x26, x28, x13 + adcs x27, x27, x12 + adcs x25, x25, x11 + adcs x10, x19, x10 + stp x10, x14, [sp, #24] + adcs x23, x23, x9 + ldr x9, [sp, #16] // 8-byte Folded Reload + adcs x28, x9, x8 + ldr x8, [sp, #48] // 8-byte Folded Reload + adcs x22, x8, xzr + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #56] // 8-byte Folded Spill + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #88] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + adcs x8, xzr, xzr + str x8, [sp, #48] // 8-byte Folded Spill + ldr x19, [sp, #40] // 8-byte Folded Reload + mul x1, x21, x19 + sub x8, x29, #240 // =240 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [x29, #-176] + ldp x11, x10, [x29, #-192] + ldp x13, x12, [x29, #-208] + ldp x14, x15, [x29, #-240] + ldp x16, x17, [x29, #-224] + cmn x21, x14 + ldr x14, [sp, #32] // 8-byte Folded Reload + adcs x21, x14, x15 + adcs x14, x24, x16 + adcs x26, x26, x17 + adcs x27, x27, x13 + adcs x25, x25, x12 + ldr x12, [sp, #24] // 8-byte Folded Reload + adcs x11, x12, x11 + stp x11, x14, [sp, #24] + adcs x23, x23, x10 + adcs x28, x28, x9 + adcs x22, x22, x8 + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x24, x8, xzr + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #88] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [sp, #48] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #56] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #672 // =672 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #744] + ldr x9, [sp, #736] + ldr x10, [sp, #728] + ldr x11, [sp, #720] + ldr x12, [sp, #712] + ldr x13, [sp, #704] + ldr x14, [sp, #672] + ldr x15, [sp, #680] + ldr x16, [sp, #688] + ldr x17, [sp, #696] + cmn x21, x14 + ldr x14, [sp, #32] // 8-byte Folded Reload + adcs x21, x14, x15 + adcs x14, x26, x16 + str x14, [sp, #48] // 8-byte Folded Spill + adcs x27, x27, x17 + adcs x25, x25, x13 + ldr x13, [sp, #24] // 8-byte Folded Reload + adcs x12, x13, x12 + str x12, [sp, #32] // 8-byte Folded Spill + adcs x23, x23, x11 + adcs x28, x28, x10 + adcs x22, x22, x9 + adcs x24, x24, x8 + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x26, x8, xzr + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #88] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [sp, #56] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #64] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #592 // =592 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #664] + ldr x9, [sp, #656] + ldr x10, [sp, #648] + ldr x11, [sp, #640] + ldr x12, [sp, #632] + ldr x13, [sp, #624] + ldr x14, [sp, #592] + ldr x15, [sp, #600] + ldr x16, [sp, #608] + ldr x17, [sp, #616] + cmn x21, x14 + ldr x14, [sp, #48] // 8-byte Folded Reload + adcs x21, x14, x15 + adcs x14, x27, x16 + str x14, [sp, #56] // 8-byte Folded Spill + adcs x25, x25, x17 + ldr x14, [sp, #32] // 8-byte Folded Reload + adcs x13, x14, x13 + str x13, [sp, #48] // 8-byte Folded Spill + adcs x23, x23, x12 + adcs x28, x28, x11 + adcs x22, x22, x10 + adcs x24, x24, x9 + adcs x26, x26, x8 + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x27, x8, xzr + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + ldr x8, [sp, #88] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [sp, #64] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #72] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #512 // =512 + mov x0, x20 + bl .LmulPv576x64 + ldr x8, [sp, #584] + ldr x9, [sp, #576] + ldr x10, [sp, #568] + ldr x11, [sp, #560] + ldr x12, [sp, #552] + ldr x13, [sp, #544] + ldr x14, [sp, #512] + ldr x15, [sp, #520] + ldr x16, [sp, #528] + ldr x17, [sp, #536] + cmn x21, x14 + ldr x14, [sp, #56] // 8-byte Folded Reload + adcs x21, x14, x15 + adcs x14, x25, x16 + str x14, [sp, #64] // 8-byte Folded Spill + ldr x14, [sp, #48] // 8-byte Folded Reload + adcs x14, x14, x17 + str x14, [sp, #56] // 8-byte Folded Spill + adcs x23, x23, x13 + adcs x28, x28, x12 + adcs x22, x22, x11 + adcs x24, x24, x10 + adcs x26, x26, x9 + adcs x27, x27, x8 + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x25, x8, xzr + ldr x8, [sp, #88] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [sp, #72] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #80] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #432 // =432 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #496] + ldp x11, x10, [sp, #480] + ldp x13, x12, [sp, #464] + ldp x14, x15, [sp, #432] + ldp x16, x17, [sp, #448] + cmn x21, x14 + ldr x14, [sp, #64] // 8-byte Folded Reload + adcs x21, x14, x15 + ldr x14, [sp, #56] // 8-byte Folded Reload + adcs x14, x14, x16 + adcs x23, x23, x17 + adcs x28, x28, x13 + adcs x22, x22, x12 + adcs x24, x24, x11 + adcs x26, x26, x10 + adcs x27, x27, x9 + adcs x25, x25, x8 + ldr x8, [sp, #88] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #96] // 8-byte Folded Spill + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + stp x14, x8, [sp, #72] + mul x1, x21, x19 + add x8, sp, #352 // =352 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #416] + ldp x11, x10, [sp, #400] + ldp x13, x12, [sp, #384] + ldp x14, x15, [sp, #352] + ldp x16, x17, [sp, #368] + cmn x21, x14 + ldr x14, [sp, #72] // 8-byte Folded Reload + adcs x21, x14, x15 + adcs x14, x23, x16 + str x14, [sp, #72] // 8-byte Folded Spill + adcs x28, x28, x17 + adcs x22, x22, x13 + adcs x24, x24, x12 + adcs x26, x26, x11 + adcs x27, x27, x10 + adcs x25, x25, x9 + ldr x9, [sp, #88] // 8-byte Folded Reload + adcs x8, x9, x8 + str x8, [sp, #88] // 8-byte Folded Spill + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x23, x8, xzr + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + ldr x8, [sp, #80] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #96] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #272 // =272 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #336] + ldp x11, x10, [sp, #320] + ldp x13, x12, [sp, #304] + ldp x14, x15, [sp, #272] + ldp x16, x17, [sp, #288] + cmn x21, x14 + ldr x14, [sp, #72] // 8-byte Folded Reload + adcs x21, x14, x15 + adcs x14, x28, x16 + adcs x22, x22, x17 + adcs x24, x24, x13 + adcs x26, x26, x12 + adcs x27, x27, x11 + adcs x25, x25, x10 + ldr x10, [sp, #88] // 8-byte Folded Reload + adcs x9, x10, x9 + stp x14, x9, [sp, #80] + adcs x23, x23, x8 + ldr x8, [sp, #104] // 8-byte Folded Reload + adcs x28, x8, xzr + ldr x8, [sp, #96] // 8-byte Folded Reload + adcs x8, x8, xzr + str x8, [sp, #104] // 8-byte Folded Spill + mul x1, x21, x19 + add x8, sp, #192 // =192 + mov x0, x20 + bl .LmulPv576x64 + ldp x9, x8, [sp, #256] + ldp x11, x10, [sp, #240] + ldp x13, x12, [sp, #224] + ldp x14, x15, [sp, #192] + ldp x16, x17, [sp, #208] + cmn x21, x14 + ldr x14, [sp, #80] // 8-byte Folded Reload + adcs x14, x14, x15 + adcs x15, x22, x16 + adcs x16, x24, x17 + adcs x13, x26, x13 + adcs x12, x27, x12 + adcs x11, x25, x11 + ldr x17, [sp, #88] // 8-byte Folded Reload + adcs x10, x17, x10 + adcs x9, x23, x9 + adcs x8, x28, x8 + ldp x17, x18, [sp, #104] + adcs x17, x17, xzr + subs x18, x14, x18 + ldp x0, x1, [sp, #120] + sbcs x0, x15, x0 + sbcs x1, x16, x1 + ldp x2, x3, [sp, #136] + sbcs x2, x13, x2 + sbcs x3, x12, x3 + ldr x4, [sp, #152] // 8-byte Folded Reload + sbcs x4, x11, x4 + ldp x5, x6, [sp, #168] + sbcs x5, x10, x5 + sbcs x6, x9, x6 + ldr x7, [sp, #184] // 8-byte Folded Reload + sbcs x7, x8, x7 + sbcs x17, x17, xzr + tst x17, #0x1 + csel x14, x14, x18, ne + csel x15, x15, x0, ne + csel x16, x16, x1, ne + csel x13, x13, x2, ne + csel x12, x12, x3, ne + csel x11, x11, x4, ne + csel x10, x10, x5, ne + csel x9, x9, x6, ne + csel x8, x8, x7, ne + ldr x17, [sp, #160] // 8-byte Folded Reload + stp x14, x15, [x17] + stp x16, x13, [x17, #16] + stp x12, x11, [x17, #32] + stp x10, x9, [x17, #48] + str x8, [x17, #64] + sub sp, x29, #80 // =80 + ldp x29, x30, [sp, #80] + ldp x20, x19, [sp, #64] + ldp x22, x21, [sp, #48] + ldp x24, x23, [sp, #32] + ldp x26, x25, [sp, #16] + ldp x28, x27, [sp], #96 + ret +.Lfunc_end132: + .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L + + .globl mcl_fp_addPre9L + .align 2 + .type mcl_fp_addPre9L,@function +mcl_fp_addPre9L: // @mcl_fp_addPre9L +// BB#0: + ldp x11, x8, [x2, #56] + ldp x13, x9, [x1, #56] + ldp x15, x10, [x2, #40] + ldp x17, x12, [x1, #40] + ldp x3, x14, [x2, #24] + ldr x4, [x2] + ldp x2, x18, [x2, #8] + ldp x5, x6, [x1] + ldr x7, [x1, #16] + ldp x1, x16, [x1, #24] + adds x4, x4, x5 + adcs x2, x2, x6 + stp x4, x2, [x0] + adcs x18, x18, x7 + str x18, [x0, #16] + adcs x18, x3, x1 + adcs x14, x14, x16 + stp x18, x14, [x0, #24] + adcs x14, x15, x17 + adcs x10, x10, x12 + stp x14, x10, [x0, #40] + adcs x10, x11, x13 + adcs x9, x8, x9 + adcs x8, xzr, xzr + stp x10, x9, [x0, #56] + mov x0, x8 + ret +.Lfunc_end133: + .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L + + .globl mcl_fp_subPre9L + .align 2 + .type mcl_fp_subPre9L,@function +mcl_fp_subPre9L: // @mcl_fp_subPre9L +// BB#0: + ldp x11, x8, [x2, #56] + ldp x13, x9, [x1, #56] + ldp x15, x10, [x2, #40] + ldp x17, x12, [x1, #40] + ldp x3, x14, [x2, #24] + ldr x4, [x2] + ldp x2, x18, [x2, #8] + ldp x5, x6, [x1] + ldr x7, [x1, #16] + ldp x1, x16, [x1, #24] + subs x4, x5, x4 + sbcs x2, x6, x2 + stp x4, x2, [x0] + sbcs x18, x7, x18 + str x18, [x0, #16] + sbcs x18, x1, x3 + sbcs x14, x16, x14 + stp x18, x14, [x0, #24] + sbcs x14, x17, x15 + sbcs x10, x12, x10 + stp x14, x10, [x0, #40] + sbcs x10, x13, x11 + sbcs x9, x9, x8 + ngcs x8, xzr + and x8, x8, #0x1 + stp x10, x9, [x0, #56] + mov x0, x8 + ret +.Lfunc_end134: + .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L + + .globl mcl_fp_shr1_9L + .align 2 + .type mcl_fp_shr1_9L,@function +mcl_fp_shr1_9L: // @mcl_fp_shr1_9L +// BB#0: + ldp x8, x9, [x1] + ldp x12, x10, [x1, #56] + ldp x16, x11, [x1, #40] + ldp x13, x14, [x1, #16] + ldr x15, [x1, #32] + extr x8, x9, x8, #1 + extr x9, x13, x9, #1 + extr x13, x14, x13, #1 + extr x14, x15, x14, #1 + extr x15, x16, x15, #1 + extr x16, x11, x16, #1 + extr x11, x12, x11, #1 + extr x12, x10, x12, #1 + lsr x10, x10, #1 + stp x8, x9, [x0] + stp x13, x14, [x0, #16] + stp x15, x16, [x0, #32] + stp x11, x12, [x0, #48] + str x10, [x0, #64] + ret +.Lfunc_end135: + .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L + + .globl mcl_fp_add9L + .align 2 + .type mcl_fp_add9L,@function +mcl_fp_add9L: // @mcl_fp_add9L +// BB#0: + stp x24, x23, [sp, #-48]! + stp x22, x21, [sp, #16] + stp x20, x19, [sp, #32] + ldp x11, x8, [x2, #56] + ldp x13, x9, [x1, #56] + ldp x15, x10, [x2, #40] + ldp x17, x12, [x1, #40] + ldp x4, x14, [x2, #24] + ldr x5, [x2] + ldp x2, x18, [x2, #8] + ldp x6, x7, [x1] + ldr x19, [x1, #16] + ldp x1, x16, [x1, #24] + adds x5, x5, x6 + adcs x2, x2, x7 + adcs x18, x18, x19 + ldp x21, x7, [x3, #40] + ldp x19, x6, [x3, #56] + adcs x1, x4, x1 + adcs x4, x14, x16 + ldr x20, [x3, #32] + adcs x17, x15, x17 + adcs x10, x10, x12 + ldp x12, x14, [x3] + stp x5, x2, [x0] + stp x18, x1, [x0, #16] + stp x4, x17, [x0, #32] + adcs x22, x11, x13 + stp x10, x22, [x0, #48] + adcs x8, x8, x9 + str x8, [x0, #64] + adcs x23, xzr, xzr + ldp x9, x11, [x3, #16] + subs x16, x5, x12 + sbcs x15, x2, x14 + sbcs x14, x18, x9 + sbcs x13, x1, x11 + sbcs x12, x4, x20 + sbcs x11, x17, x21 + sbcs x10, x10, x7 + sbcs x9, x22, x19 + sbcs x8, x8, x6 + sbcs x17, x23, xzr + and w17, w17, #0x1 + tbnz w17, #0, .LBB136_2 +// BB#1: // %nocarry + stp x16, x15, [x0] + stp x14, x13, [x0, #16] + stp x12, x11, [x0, #32] + stp x10, x9, [x0, #48] + str x8, [x0, #64] +.LBB136_2: // %carry + ldp x20, x19, [sp, #32] + ldp x22, x21, [sp, #16] + ldp x24, x23, [sp], #48 + ret +.Lfunc_end136: + .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L + + .globl mcl_fp_addNF9L + .align 2 + .type mcl_fp_addNF9L,@function +mcl_fp_addNF9L: // @mcl_fp_addNF9L +// BB#0: + stp x20, x19, [sp, #-16]! + ldp x11, x8, [x1, #56] + ldp x13, x9, [x2, #56] + ldp x15, x10, [x1, #40] + ldp x17, x12, [x2, #40] + ldp x4, x14, [x1, #24] + ldr x5, [x1] + ldp x1, x18, [x1, #8] + ldp x6, x7, [x2] + ldr x19, [x2, #16] + ldp x2, x16, [x2, #24] + adds x5, x6, x5 + adcs x1, x7, x1 + adcs x18, x19, x18 + ldp x19, x6, [x3, #56] + adcs x2, x2, x4 + adcs x14, x16, x14 + ldp x4, x7, [x3, #40] + adcs x15, x17, x15 + adcs x10, x12, x10 + ldp x12, x17, [x3] + adcs x11, x13, x11 + ldr x13, [x3, #16] + ldp x3, x16, [x3, #24] + adcs x8, x9, x8 + subs x9, x5, x12 + sbcs x12, x1, x17 + sbcs x13, x18, x13 + sbcs x17, x2, x3 + sbcs x16, x14, x16 + sbcs x3, x15, x4 + sbcs x4, x10, x7 + sbcs x7, x11, x19 + sbcs x6, x8, x6 + asr x19, x6, #63 + cmp x19, #0 // =0 + csel x9, x5, x9, lt + csel x12, x1, x12, lt + csel x13, x18, x13, lt + csel x17, x2, x17, lt + csel x14, x14, x16, lt + csel x15, x15, x3, lt + csel x10, x10, x4, lt + csel x11, x11, x7, lt + csel x8, x8, x6, lt + stp x9, x12, [x0] + stp x13, x17, [x0, #16] + stp x14, x15, [x0, #32] + stp x10, x11, [x0, #48] + str x8, [x0, #64] + ldp x20, x19, [sp], #16 + ret +.Lfunc_end137: + .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L + + .globl mcl_fp_sub9L + .align 2 + .type mcl_fp_sub9L,@function +mcl_fp_sub9L: // @mcl_fp_sub9L +// BB#0: + stp x20, x19, [sp, #-16]! + ldp x15, x16, [x2, #56] + ldp x4, x17, [x1, #56] + ldp x13, x14, [x2, #40] + ldp x6, x18, [x1, #40] + ldp x11, x12, [x2, #24] + ldp x9, x10, [x2, #8] + ldr x8, [x2] + ldp x2, x7, [x1] + ldr x19, [x1, #16] + ldp x1, x5, [x1, #24] + subs x8, x2, x8 + sbcs x9, x7, x9 + stp x8, x9, [x0] + sbcs x10, x19, x10 + sbcs x11, x1, x11 + stp x10, x11, [x0, #16] + sbcs x12, x5, x12 + sbcs x13, x6, x13 + stp x12, x13, [x0, #32] + sbcs x14, x18, x14 + sbcs x15, x4, x15 + stp x14, x15, [x0, #48] + sbcs x16, x17, x16 + str x16, [x0, #64] + ngcs x17, xzr + and w17, w17, #0x1 + tbnz w17, #0, .LBB138_2 +// BB#1: // %nocarry + ldp x20, x19, [sp], #16 + ret +.LBB138_2: // %carry + ldp x18, x1, [x3] + ldp x2, x4, [x3, #16] + ldp x5, x6, [x3, #32] + adds x8, x18, x8 + adcs x9, x1, x9 + ldr x18, [x3, #48] + ldp x1, x17, [x3, #56] + adcs x10, x2, x10 + adcs x11, x4, x11 + adcs x12, x5, x12 + adcs x13, x6, x13 + adcs x14, x18, x14 + adcs x15, x1, x15 + adcs x16, x17, x16 + stp x8, x9, [x0] + stp x10, x11, [x0, #16] + stp x12, x13, [x0, #32] + stp x14, x15, [x0, #48] + str x16, [x0, #64] + ldp x20, x19, [sp], #16 + ret +.Lfunc_end138: + .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L + + .globl mcl_fp_subNF9L + .align 2 + .type mcl_fp_subNF9L,@function +mcl_fp_subNF9L: // @mcl_fp_subNF9L +// BB#0: + stp x20, x19, [sp, #-16]! + ldp x11, x8, [x2, #56] + ldp x13, x9, [x1, #56] + ldp x15, x10, [x2, #40] + ldp x17, x12, [x1, #40] + ldp x4, x14, [x2, #24] + ldr x5, [x2] + ldp x2, x18, [x2, #8] + ldp x6, x7, [x1] + ldr x19, [x1, #16] + ldp x1, x16, [x1, #24] + subs x5, x6, x5 + sbcs x2, x7, x2 + sbcs x18, x19, x18 + ldp x19, x6, [x3, #56] + sbcs x1, x1, x4 + sbcs x14, x16, x14 + ldp x4, x7, [x3, #40] + sbcs x15, x17, x15 + sbcs x10, x12, x10 + ldp x12, x17, [x3] + sbcs x11, x13, x11 + sbcs x8, x9, x8 + asr x9, x8, #63 + extr x13, x9, x8, #63 + and x12, x13, x12 + ldr x13, [x3, #16] + ldp x3, x16, [x3, #24] + and x19, x9, x19 + and x6, x9, x6 + ror x9, x9, #63 + and x17, x9, x17 + and x13, x9, x13 + and x3, x9, x3 + and x16, x9, x16 + and x4, x9, x4 + and x9, x9, x7 + adds x12, x12, x5 + str x12, [x0] + adcs x12, x17, x2 + str x12, [x0, #8] + adcs x12, x13, x18 + str x12, [x0, #16] + adcs x12, x3, x1 + str x12, [x0, #24] + adcs x12, x16, x14 + str x12, [x0, #32] + adcs x12, x4, x15 + adcs x9, x9, x10 + stp x12, x9, [x0, #40] + adcs x9, x19, x11 + adcs x8, x6, x8 + stp x9, x8, [x0, #56] + ldp x20, x19, [sp], #16 + ret +.Lfunc_end139: + .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L + + .globl mcl_fpDbl_add9L + .align 2 + .type mcl_fpDbl_add9L,@function +mcl_fpDbl_add9L: // @mcl_fpDbl_add9L +// BB#0: + stp x20, x19, [sp, #-16]! + ldp x10, x8, [x2, #128] + ldp x11, x9, [x1, #128] + ldp x12, x13, [x2, #112] + ldp x14, x15, [x1, #112] + ldp x16, x17, [x2, #96] + ldp x18, x4, [x2] + ldp x5, x6, [x1] + ldp x7, x19, [x2, #16] + adds x18, x18, x5 + adcs x4, x4, x6 + ldp x5, x6, [x1, #16] + str x18, [x0] + adcs x18, x7, x5 + ldp x5, x7, [x1, #96] + str x4, [x0, #8] + ldr x4, [x1, #32] + str x18, [x0, #16] + adcs x18, x19, x6 + ldp x6, x19, [x2, #32] + str x18, [x0, #24] + adcs x4, x6, x4 + ldp x18, x6, [x1, #40] + str x4, [x0, #32] + adcs x18, x19, x18 + ldp x4, x19, [x2, #48] + str x18, [x0, #40] + adcs x4, x4, x6 + ldp x18, x6, [x1, #56] + str x4, [x0, #48] + adcs x18, x19, x18 + ldp x4, x19, [x2, #64] + str x18, [x0, #56] + ldr x18, [x1, #72] + adcs x4, x4, x6 + ldp x6, x2, [x2, #80] + str x4, [x0, #64] + ldp x4, x1, [x1, #80] + adcs x18, x19, x18 + adcs x4, x6, x4 + adcs x1, x2, x1 + ldp x6, x19, [x3, #56] + adcs x16, x16, x5 + adcs x17, x17, x7 + ldp x7, x2, [x3, #40] + adcs x12, x12, x14 + adcs x13, x13, x15 + ldp x15, x5, [x3, #24] + adcs x10, x10, x11 + ldr x11, [x3] + ldp x3, x14, [x3, #8] + adcs x8, x8, x9 + adcs x9, xzr, xzr + subs x11, x18, x11 + sbcs x3, x4, x3 + sbcs x14, x1, x14 + sbcs x15, x16, x15 + sbcs x5, x17, x5 + sbcs x7, x12, x7 + sbcs x2, x13, x2 + sbcs x6, x10, x6 + sbcs x19, x8, x19 + sbcs x9, x9, xzr + tst x9, #0x1 + csel x9, x18, x11, ne + csel x11, x4, x3, ne + csel x14, x1, x14, ne + csel x15, x16, x15, ne + csel x16, x17, x5, ne + csel x12, x12, x7, ne + csel x13, x13, x2, ne + csel x10, x10, x6, ne + csel x8, x8, x19, ne + stp x9, x11, [x0, #72] + stp x14, x15, [x0, #88] + stp x16, x12, [x0, #104] + stp x13, x10, [x0, #120] + str x8, [x0, #136] + ldp x20, x19, [sp], #16 + ret +.Lfunc_end140: + .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L + + .globl mcl_fpDbl_sub9L + .align 2 + .type mcl_fpDbl_sub9L,@function +mcl_fpDbl_sub9L: // @mcl_fpDbl_sub9L +// BB#0: + ldp x10, x8, [x2, #128] + ldp x11, x9, [x1, #128] + ldp x14, x12, [x2, #112] + ldp x15, x13, [x1, #112] + ldp x16, x17, [x2] + ldp x18, x4, [x1] + ldp x5, x6, [x2, #96] + ldr x7, [x1, #16] + subs x16, x18, x16 + sbcs x17, x4, x17 + ldp x18, x4, [x2, #16] + str x16, [x0] + ldr x16, [x1, #24] + sbcs x18, x7, x18 + str x17, [x0, #8] + ldp x17, x7, [x2, #32] + str x18, [x0, #16] + sbcs x16, x16, x4 + ldp x18, x4, [x1, #32] + str x16, [x0, #24] + sbcs x16, x18, x17 + ldp x17, x18, [x2, #48] + str x16, [x0, #32] + sbcs x4, x4, x7 + ldp x16, x7, [x1, #48] + str x4, [x0, #40] + sbcs x16, x16, x17 + ldp x17, x4, [x2, #80] + str x16, [x0, #48] + ldr x16, [x1, #64] + sbcs x18, x7, x18 + ldp x7, x2, [x2, #64] + str x18, [x0, #56] + ldr x18, [x1, #72] + sbcs x16, x16, x7 + str x16, [x0, #64] + ldp x16, x7, [x1, #80] + sbcs x18, x18, x2 + ldp x2, x1, [x1, #96] + sbcs x16, x16, x17 + sbcs x4, x7, x4 + sbcs x2, x2, x5 + ldp x7, x17, [x3, #56] + sbcs x1, x1, x6 + sbcs x14, x15, x14 + ldp x6, x5, [x3, #40] + sbcs x12, x13, x12 + sbcs x10, x11, x10 + ldp x13, x15, [x3, #24] + sbcs x8, x9, x8 + ngcs x9, xzr + tst x9, #0x1 + ldr x9, [x3] + ldp x3, x11, [x3, #8] + csel x17, x17, xzr, ne + csel x7, x7, xzr, ne + csel x5, x5, xzr, ne + csel x6, x6, xzr, ne + csel x15, x15, xzr, ne + csel x13, x13, xzr, ne + csel x11, x11, xzr, ne + csel x3, x3, xzr, ne + csel x9, x9, xzr, ne + adds x9, x9, x18 + str x9, [x0, #72] + adcs x9, x3, x16 + str x9, [x0, #80] + adcs x9, x11, x4 + str x9, [x0, #88] + adcs x9, x13, x2 + str x9, [x0, #96] + adcs x9, x15, x1 + str x9, [x0, #104] + adcs x9, x6, x14 + str x9, [x0, #112] + adcs x9, x5, x12 + str x9, [x0, #120] + adcs x9, x7, x10 + adcs x8, x17, x8 + stp x9, x8, [x0, #128] + ret +.Lfunc_end141: + .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L + + + .section ".note.GNU-stack","",@progbits diff --git a/vendor/github.com/dexon-foundation/mcl/src/asm/arm.s b/vendor/github.com/dexon-foundation/mcl/src/asm/arm.s new file mode 100644 index 000000000..2df9bfb92 --- /dev/null +++ b/vendor/github.com/dexon-foundation/mcl/src/asm/arm.s @@ -0,0 +1,84189 @@ + .text + .syntax unified + .eabi_attribute 67, "2.09" @ Tag_conformance + .eabi_attribute 6, 1 @ Tag_CPU_arch + .eabi_attribute 8, 1 @ Tag_ARM_ISA_use + .eabi_attribute 15, 1 @ Tag_ABI_PCS_RW_data + .eabi_attribute 16, 1 @ Tag_ABI_PCS_RO_data + .eabi_attribute 17, 2 @ Tag_ABI_PCS_GOT_use + .eabi_attribute 20, 1 @ Tag_ABI_FP_denormal + .eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions + .eabi_attribute 23, 3 @ Tag_ABI_FP_number_model + .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access + .eabi_attribute 24, 1 @ Tag_ABI_align_needed + .eabi_attribute 25, 1 @ Tag_ABI_align_preserved + .eabi_attribute 28, 1 @ Tag_ABI_VFP_args + .eabi_attribute 38, 1 @ Tag_ABI_FP_16bit_format + .eabi_attribute 14, 0 @ Tag_ABI_PCS_R9_use + .file "<stdin>" + .globl makeNIST_P192L + .align 2 + .type makeNIST_P192L,%function +makeNIST_P192L: @ @makeNIST_P192L + .fnstart +@ BB#0: + mvn r1, #0 + mvn r2, #1 + str r1, [r0] + stmib r0, {r1, r2} + str r1, [r0, #12] + str r1, [r0, #16] + str r1, [r0, #20] + mov pc, lr +.Lfunc_end0: + .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L + .cantunwind + .fnend + + .globl mcl_fpDbl_mod_NIST_P192L + .align 2 + .type mcl_fpDbl_mod_NIST_P192L,%function +mcl_fpDbl_mod_NIST_P192L: @ @mcl_fpDbl_mod_NIST_P192L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #8 + sub sp, sp, #8 + add lr, r1, #24 + ldr r2, [r1, #40] + ldr r3, [r1, #44] + ldr r7, [r1, #16] + ldr r8, [r1, #20] + ldm lr, {r4, r5, r6, lr} + ldm r1, {r1, r9, r10, r12} + adds r11, r4, r1 + adcs r9, r5, r9 + adcs r10, r6, r10 + adcs r1, lr, r12 + str r1, [sp, #4] @ 4-byte Spill + adcs r1, r2, r7 + mov r7, #0 + str r1, [sp] @ 4-byte Spill + adcs r8, r3, r8 + mov r1, #0 + adcs r1, r1, #0 + adc r12, r7, #0 + ldr r7, [sp, #4] @ 4-byte Reload + adds r11, r11, r2 + adcs r9, r9, r3 + adcs r4, r10, r4 + adcs r5, r7, r5 + ldr r7, [sp] @ 4-byte Reload + adcs r6, r7, r6 + adcs r7, r8, lr + adcs r1, r1, #0 + adc r12, r12, #0 + adds lr, r4, r2 + adcs r3, r5, r3 + adcs r6, r6, #0 + adcs r7, r7, #0 + adcs r1, r1, #0 + adc r5, r12, #0 + adds r12, r1, r11 + adcs r11, r5, r9 + adcs r10, r1, lr + mov r1, #0 + adcs r8, r5, r3 + adcs lr, r6, #0 + adcs r2, r7, #0 + adc r9, r1, #0 + adds r7, r12, #1 + str r2, [sp, #4] @ 4-byte Spill + adcs r6, r11, #0 + adcs r3, r10, #1 + adcs r5, r8, #0 + adcs r1, lr, #0 + adcs r2, r2, #0 + sbc r4, r9, #0 + ands r4, r4, #1 + movne r7, r12 + movne r6, r11 + movne r3, r10 + cmp r4, #0 + movne r5, r8 + movne r1, lr + str r7, [r0] + str r6, [r0, #4] + str r3, [r0, #8] + str r5, [r0, #12] + str r1, [r0, #16] + ldr r1, [sp, #4] @ 4-byte Reload + movne r2, r1 + str r2, [r0, #20] + add sp, sp, #8 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end1: + .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L + .cantunwind + .fnend + + .globl mcl_fp_sqr_NIST_P192L + .align 2 + .type mcl_fp_sqr_NIST_P192L,%function +mcl_fp_sqr_NIST_P192L: @ @mcl_fp_sqr_NIST_P192L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #60 + sub sp, sp, #60 + mov r8, r0 + add r0, sp, #12 + bl mcl_fpDbl_sqrPre6L(PLT) + add r12, sp, #12 + ldr lr, [sp, #48] + ldr r2, [sp, #44] + ldr r3, [sp, #40] + mov r4, #0 + ldm r12, {r0, r1, r5, r6, r12} + ldr r7, [sp, #36] + adds r0, r7, r0 + str r0, [sp, #8] @ 4-byte Spill + adcs r0, r3, r1 + mov r1, #0 + adcs r10, r2, r5 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #52] + ldr r5, [sp, #32] + adcs r11, lr, r6 + ldr r6, [sp, #56] + adcs r9, r0, r12 + adcs r5, r6, r5 + adcs r1, r1, #0 + adc r12, r4, #0 + ldr r4, [sp, #8] @ 4-byte Reload + adds r4, r4, r0 + str r4, [sp, #8] @ 4-byte Spill + ldr r4, [sp, #4] @ 4-byte Reload + adcs r4, r4, r6 + adcs r7, r10, r7 + adcs r3, r11, r3 + adcs r2, r9, r2 + adcs r5, r5, lr + adcs r1, r1, #0 + adc r12, r12, #0 + adds lr, r7, r0 + ldr r0, [sp, #8] @ 4-byte Reload + adcs r3, r3, r6 + adcs r2, r2, #0 + adcs r7, r5, #0 + adcs r1, r1, #0 + adc r6, r12, #0 + adds r5, r1, r0 + mov r0, #0 + adcs r11, r6, r4 + adcs r10, r1, lr + adcs r12, r6, r3 + adcs lr, r2, #0 + adcs r4, r7, #0 + adc r9, r0, #0 + adds r7, r5, #1 + str r4, [sp, #8] @ 4-byte Spill + adcs r2, r11, #0 + adcs r3, r10, #1 + adcs r6, r12, #0 + adcs r1, lr, #0 + adcs r0, r4, #0 + sbc r4, r9, #0 + ands r4, r4, #1 + movne r7, r5 + movne r2, r11 + movne r3, r10 + cmp r4, #0 + movne r6, r12 + movne r1, lr + str r7, [r8] + str r2, [r8, #4] + str r3, [r8, #8] + str r6, [r8, #12] + str r1, [r8, #16] + ldr r1, [sp, #8] @ 4-byte Reload + movne r0, r1 + str r0, [r8, #20] + add sp, sp, #60 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end2: + .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L + .cantunwind + .fnend + + .globl mcl_fp_mulNIST_P192L + .align 2 + .type mcl_fp_mulNIST_P192L,%function +mcl_fp_mulNIST_P192L: @ @mcl_fp_mulNIST_P192L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #60 + sub sp, sp, #60 + mov r8, r0 + add r0, sp, #12 + bl mcl_fpDbl_mulPre6L(PLT) + add r12, sp, #12 + ldr lr, [sp, #48] + ldr r2, [sp, #44] + ldr r3, [sp, #40] + mov r4, #0 + ldm r12, {r0, r1, r5, r6, r12} + ldr r7, [sp, #36] + adds r0, r7, r0 + str r0, [sp, #8] @ 4-byte Spill + adcs r0, r3, r1 + mov r1, #0 + adcs r10, r2, r5 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #52] + ldr r5, [sp, #32] + adcs r11, lr, r6 + ldr r6, [sp, #56] + adcs r9, r0, r12 + adcs r5, r6, r5 + adcs r1, r1, #0 + adc r12, r4, #0 + ldr r4, [sp, #8] @ 4-byte Reload + adds r4, r4, r0 + str r4, [sp, #8] @ 4-byte Spill + ldr r4, [sp, #4] @ 4-byte Reload + adcs r4, r4, r6 + adcs r7, r10, r7 + adcs r3, r11, r3 + adcs r2, r9, r2 + adcs r5, r5, lr + adcs r1, r1, #0 + adc r12, r12, #0 + adds lr, r7, r0 + ldr r0, [sp, #8] @ 4-byte Reload + adcs r3, r3, r6 + adcs r2, r2, #0 + adcs r7, r5, #0 + adcs r1, r1, #0 + adc r6, r12, #0 + adds r5, r1, r0 + mov r0, #0 + adcs r11, r6, r4 + adcs r10, r1, lr + adcs r12, r6, r3 + adcs lr, r2, #0 + adcs r4, r7, #0 + adc r9, r0, #0 + adds r7, r5, #1 + str r4, [sp, #8] @ 4-byte Spill + adcs r2, r11, #0 + adcs r3, r10, #1 + adcs r6, r12, #0 + adcs r1, lr, #0 + adcs r0, r4, #0 + sbc r4, r9, #0 + ands r4, r4, #1 + movne r7, r5 + movne r2, r11 + movne r3, r10 + cmp r4, #0 + movne r6, r12 + movne r1, lr + str r7, [r8] + str r2, [r8, #4] + str r3, [r8, #8] + str r6, [r8, #12] + str r1, [r8, #16] + ldr r1, [sp, #8] @ 4-byte Reload + movne r0, r1 + str r0, [r8, #20] + add sp, sp, #60 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end3: + .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L + .cantunwind + .fnend + + .globl mcl_fpDbl_mod_NIST_P521L + .align 2 + .type mcl_fpDbl_mod_NIST_P521L,%function +mcl_fpDbl_mod_NIST_P521L: @ @mcl_fpDbl_mod_NIST_P521L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #44 + sub sp, sp, #44 + ldr r6, [r1, #64] + mov r5, #255 + ldr r3, [r1, #72] + ldr r2, [r1, #76] + mov r9, r0 + orr r5, r5, #256 + and r5, r6, r5 + lsr r6, r6, #9 + lsr r7, r3, #9 + str r5, [sp, #40] @ 4-byte Spill + ldr r5, [r1, #68] + orr r12, r7, r2, lsl #23 + lsr r2, r2, #9 + lsr r4, r5, #9 + orr r6, r6, r5, lsl #23 + ldr r5, [r1] + orr r3, r4, r3, lsl #23 + ldmib r1, {r4, r7, lr} + adds r5, r6, r5 + ldr r6, [r1, #36] + str r5, [sp, #36] @ 4-byte Spill + ldr r5, [r1, #80] + adcs r3, r3, r4 + str r3, [sp, #32] @ 4-byte Spill + adcs r7, r12, r7 + ldr r3, [r1, #84] + str r7, [sp, #28] @ 4-byte Spill + ldr r7, [r1, #88] + orr r2, r2, r5, lsl #23 + lsr r5, r5, #9 + adcs r12, r2, lr + ldr r2, [r1, #16] + orr r4, r5, r3, lsl #23 + lsr r3, r3, #9 + orr r3, r3, r7, lsl #23 + lsr r5, r7, #9 + ldr r7, [r1, #40] + adcs r2, r4, r2 + ldr r4, [r1, #24] + str r2, [sp, #24] @ 4-byte Spill + ldr r2, [r1, #20] + adcs r2, r3, r2 + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [r1, #92] + orr r3, r5, r2, lsl #23 + ldr r5, [r1, #28] + lsr r2, r2, #9 + adcs lr, r3, r4 + ldr r3, [r1, #96] + ldr r4, [r1, #44] + orr r2, r2, r3, lsl #23 + adcs r2, r2, r5 + ldr r5, [r1, #32] + str r2, [sp, #16] @ 4-byte Spill + lsr r2, r3, #9 + ldr r3, [r1, #100] + orr r2, r2, r3, lsl #23 + adcs r2, r2, r5 + ldr r5, [r1, #48] + str r2, [sp, #12] @ 4-byte Spill + lsr r2, r3, #9 + ldr r3, [r1, #104] + orr r2, r2, r3, lsl #23 + adcs r0, r2, r6 + lsr r2, r3, #9 + ldr r3, [r1, #108] + ldr r6, [r1, #52] + str r0, [sp, #8] @ 4-byte Spill + orr r2, r2, r3, lsl #23 + adcs r7, r2, r7 + lsr r2, r3, #9 + ldr r3, [r1, #112] + orr r2, r2, r3, lsl #23 + lsr r3, r3, #9 + adcs r2, r2, r4 + ldr r4, [r1, #116] + orr r3, r3, r4, lsl #23 + lsr r4, r4, #9 + adcs r3, r3, r5 + ldr r5, [r1, #120] + orr r4, r4, r5, lsl #23 + adcs r11, r4, r6 + lsr r4, r5, #9 + ldr r5, [r1, #124] + ldr r6, [r1, #56] + orr r4, r4, r5, lsl #23 + adcs r10, r4, r6 + lsr r4, r5, #9 + ldr r5, [r1, #128] + ldr r1, [r1, #60] + orr r4, r4, r5, lsl #23 + adcs r8, r4, r1 + ldr r4, [sp, #40] @ 4-byte Reload + lsr r1, r5, #9 + ldr r5, [sp, #36] @ 4-byte Reload + adc r1, r1, r4 + mov r4, #1 + and r4, r4, r1, lsr #9 + adds r5, r4, r5 + ldr r4, [sp, #32] @ 4-byte Reload + str r5, [sp, #40] @ 4-byte Spill + adcs r6, r4, #0 + ldr r4, [sp, #28] @ 4-byte Reload + str r6, [sp, #36] @ 4-byte Spill + adcs r0, r4, #0 + and r4, r6, r5 + ldr r5, [sp, #24] @ 4-byte Reload + str r0, [sp, #32] @ 4-byte Spill + and r4, r4, r0 + adcs r0, r12, #0 + str r0, [sp, #28] @ 4-byte Spill + and r6, r4, r0 + adcs r0, r5, #0 + and r4, r6, r0 + ldr r6, [sp, #20] @ 4-byte Reload + str r0, [sp, #24] @ 4-byte Spill + adcs r0, r6, #0 + ldr r6, [sp, #16] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + and r5, r4, r0 + adcs r0, lr, #0 + and r5, r5, r0 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs lr, r6, #0 + and r6, r5, lr + ldr r5, [sp, #12] @ 4-byte Reload + adcs r5, r5, #0 + and r12, r6, r5 + adcs r6, r0, #0 + adcs r7, r7, #0 + and r4, r12, r6 + adcs r2, r2, #0 + and r4, r4, r7 + adcs r3, r3, #0 + and r4, r4, r2 + adcs r0, r11, #0 + and r4, r4, r3 + adcs r10, r10, #0 + and r4, r4, r0 + adcs r11, r8, #0 + and r4, r4, r10 + adc r8, r1, #0 + ldr r1, .LCPI4_0 + and r4, r4, r11 + orr r1, r8, r1 + and r1, r4, r1 + cmn r1, #1 + beq .LBB4_2 +@ BB#1: @ %nonzero + ldr r1, [sp, #40] @ 4-byte Reload + str r1, [r9] + ldr r1, [sp, #36] @ 4-byte Reload + str r1, [r9, #4] + ldr r1, [sp, #32] @ 4-byte Reload + str r1, [r9, #8] + ldr r1, [sp, #28] @ 4-byte Reload + str r1, [r9, #12] + ldr r1, [sp, #24] @ 4-byte Reload + str r1, [r9, #16] + ldr r1, [sp, #20] @ 4-byte Reload + str r1, [r9, #20] + ldr r1, [sp, #4] @ 4-byte Reload + str r1, [r9, #24] + add r1, r9, #32 + str lr, [r9, #28] + stm r1, {r5, r6, r7} + add r1, r9, #52 + str r2, [r9, #44] + str r3, [r9, #48] + stm r1, {r0, r10, r11} + mov r1, #255 + orr r1, r1, #256 + and r1, r8, r1 + str r1, [r9, #64] + b .LBB4_3 +.LBB4_2: @ %zero + mov r0, r9 + mov r1, #0 + mov r2, #68 + bl memset(PLT) +.LBB4_3: @ %zero + add sp, sp, #44 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr + .align 2 +@ BB#4: +.LCPI4_0: + .long 4294966784 @ 0xfffffe00 +.Lfunc_end4: + .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre1L + .align 2 + .type mcl_fp_mulUnitPre1L,%function +mcl_fp_mulUnitPre1L: @ @mcl_fp_mulUnitPre1L + .fnstart +@ BB#0: + ldr r1, [r1] + umull r3, r12, r1, r2 + stm r0, {r3, r12} + mov pc, lr +.Lfunc_end5: + .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre1L + .align 2 + .type mcl_fpDbl_mulPre1L,%function +mcl_fpDbl_mulPre1L: @ @mcl_fpDbl_mulPre1L + .fnstart +@ BB#0: + ldr r1, [r1] + ldr r2, [r2] + umull r3, r12, r2, r1 + stm r0, {r3, r12} + mov pc, lr +.Lfunc_end6: + .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre1L + .align 2 + .type mcl_fpDbl_sqrPre1L,%function +mcl_fpDbl_sqrPre1L: @ @mcl_fpDbl_sqrPre1L + .fnstart +@ BB#0: + ldr r1, [r1] + umull r2, r3, r1, r1 + stm r0, {r2, r3} + mov pc, lr +.Lfunc_end7: + .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L + .cantunwind + .fnend + + .globl mcl_fp_mont1L + .align 2 + .type mcl_fp_mont1L,%function +mcl_fp_mont1L: @ @mcl_fp_mont1L + .fnstart +@ BB#0: + .save {r4, r5, r6, lr} + push {r4, r5, r6, lr} + ldr r12, [r2] + ldr r1, [r1] + mov r6, #0 + umull lr, r2, r1, r12 + ldr r12, [r3, #-4] + ldr r3, [r3] + mul r1, lr, r12 + umull r12, r4, r1, r3 + adds r5, r12, lr + adcs r5, r4, r2 + umlal lr, r2, r1, r3 + adc r6, r6, #0 + subs r1, r2, r3 + sbc r3, r6, #0 + tst r3, #1 + movne r1, r2 + str r1, [r0] + pop {r4, r5, r6, lr} + mov pc, lr +.Lfunc_end8: + .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L + .cantunwind + .fnend + + .globl mcl_fp_montNF1L + .align 2 + .type mcl_fp_montNF1L,%function +mcl_fp_montNF1L: @ @mcl_fp_montNF1L + .fnstart +@ BB#0: + .save {r11, lr} + push {r11, lr} + ldr r12, [r2] + ldr r1, [r1] + umull lr, r2, r1, r12 + ldr r12, [r3, #-4] + ldr r3, [r3] + mul r1, lr, r12 + umlal lr, r2, r1, r3 + sub r1, r2, r3 + cmp r1, #0 + movge r2, r1 + str r2, [r0] + pop {r11, lr} + mov pc, lr +.Lfunc_end9: + .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L + .cantunwind + .fnend + + .globl mcl_fp_montRed1L + .align 2 + .type mcl_fp_montRed1L,%function +mcl_fp_montRed1L: @ @mcl_fp_montRed1L + .fnstart +@ BB#0: + .save {r4, r5, r6, lr} + push {r4, r5, r6, lr} + ldr r12, [r2, #-4] + ldr r3, [r1] + ldr r2, [r2] + ldr r1, [r1, #4] + mov r6, #0 + mul lr, r3, r12 + umull r12, r4, lr, r2 + adds r5, r3, r12 + adcs r5, r1, r4 + umlal r3, r1, lr, r2 + adc r6, r6, #0 + subs r2, r1, r2 + sbc r3, r6, #0 + tst r3, #1 + movne r2, r1 + str r2, [r0] + pop {r4, r5, r6, lr} + mov pc, lr +.Lfunc_end10: + .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L + .cantunwind + .fnend + + .globl mcl_fp_addPre1L + .align 2 + .type mcl_fp_addPre1L,%function +mcl_fp_addPre1L: @ @mcl_fp_addPre1L + .fnstart +@ BB#0: + ldr r1, [r1] + ldr r2, [r2] + adds r1, r2, r1 + str r1, [r0] + mov r0, #0 + adc r0, r0, #0 + mov pc, lr +.Lfunc_end11: + .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L + .cantunwind + .fnend + + .globl mcl_fp_subPre1L + .align 2 + .type mcl_fp_subPre1L,%function +mcl_fp_subPre1L: @ @mcl_fp_subPre1L + .fnstart +@ BB#0: + ldr r2, [r2] + ldr r1, [r1] + subs r1, r1, r2 + str r1, [r0] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + mov pc, lr +.Lfunc_end12: + .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L + .cantunwind + .fnend + + .globl mcl_fp_shr1_1L + .align 2 + .type mcl_fp_shr1_1L,%function +mcl_fp_shr1_1L: @ @mcl_fp_shr1_1L + .fnstart +@ BB#0: + ldr r1, [r1] + lsr r1, r1, #1 + str r1, [r0] + mov pc, lr +.Lfunc_end13: + .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L + .cantunwind + .fnend + + .globl mcl_fp_add1L + .align 2 + .type mcl_fp_add1L,%function +mcl_fp_add1L: @ @mcl_fp_add1L + .fnstart +@ BB#0: + ldr r1, [r1] + ldr r2, [r2] + ldr r3, [r3] + adds r1, r2, r1 + mov r2, #0 + str r1, [r0] + adc r2, r2, #0 + subs r1, r1, r3 + sbc r2, r2, #0 + tst r2, #1 + streq r1, [r0] + mov pc, lr +.Lfunc_end14: + .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L + .cantunwind + .fnend + + .globl mcl_fp_addNF1L + .align 2 + .type mcl_fp_addNF1L,%function +mcl_fp_addNF1L: @ @mcl_fp_addNF1L + .fnstart +@ BB#0: + ldr r1, [r1] + ldr r2, [r2] + add r1, r2, r1 + ldr r2, [r3] + sub r2, r1, r2 + cmp r2, #0 + movlt r2, r1 + str r2, [r0] + mov pc, lr +.Lfunc_end15: + .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L + .cantunwind + .fnend + + .globl mcl_fp_sub1L + .align 2 + .type mcl_fp_sub1L,%function +mcl_fp_sub1L: @ @mcl_fp_sub1L + .fnstart +@ BB#0: + ldr r2, [r2] + ldr r1, [r1] + subs r1, r1, r2 + mov r2, #0 + sbc r2, r2, #0 + str r1, [r0] + tst r2, #1 + ldrne r2, [r3] + addne r1, r2, r1 + strne r1, [r0] + movne pc, lr + mov pc, lr +.Lfunc_end16: + .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L + .cantunwind + .fnend + + .globl mcl_fp_subNF1L + .align 2 + .type mcl_fp_subNF1L,%function +mcl_fp_subNF1L: @ @mcl_fp_subNF1L + .fnstart +@ BB#0: + ldr r2, [r2] + ldr r1, [r1] + sub r1, r1, r2 + ldr r2, [r3] + cmp r1, #0 + addlt r1, r1, r2 + str r1, [r0] + mov pc, lr +.Lfunc_end17: + .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L + .cantunwind + .fnend + + .globl mcl_fpDbl_add1L + .align 2 + .type mcl_fpDbl_add1L,%function +mcl_fpDbl_add1L: @ @mcl_fpDbl_add1L + .fnstart +@ BB#0: + .save {r11, lr} + push {r11, lr} + ldm r1, {r12, lr} + ldm r2, {r1, r2} + ldr r3, [r3] + adds r1, r1, r12 + str r1, [r0] + mov r1, #0 + adcs r2, r2, lr + adc r1, r1, #0 + subs r3, r2, r3 + sbc r1, r1, #0 + tst r1, #1 + movne r3, r2 + str r3, [r0, #4] + pop {r11, lr} + mov pc, lr +.Lfunc_end18: + .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub1L + .align 2 + .type mcl_fpDbl_sub1L,%function +mcl_fpDbl_sub1L: @ @mcl_fpDbl_sub1L + .fnstart +@ BB#0: + .save {r11, lr} + push {r11, lr} + ldm r2, {r12, lr} + ldr r2, [r1] + ldr r1, [r1, #4] + ldr r3, [r3] + subs r2, r2, r12 + str r2, [r0] + mov r2, #0 + sbcs r1, r1, lr + sbc r2, r2, #0 + tst r2, #1 + addne r1, r1, r3 + str r1, [r0, #4] + pop {r11, lr} + mov pc, lr +.Lfunc_end19: + .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre2L + .align 2 + .type mcl_fp_mulUnitPre2L,%function +mcl_fp_mulUnitPre2L: @ @mcl_fp_mulUnitPre2L + .fnstart +@ BB#0: + .save {r11, lr} + push {r11, lr} + ldm r1, {r3, lr} + umull r12, r1, r3, r2 + mov r3, #0 + umlal r1, r3, lr, r2 + str r12, [r0] + stmib r0, {r1, r3} + pop {r11, lr} + mov pc, lr +.Lfunc_end20: + .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre2L + .align 2 + .type mcl_fpDbl_mulPre2L,%function +mcl_fpDbl_mulPre2L: @ @mcl_fpDbl_mulPre2L + .fnstart +@ BB#0: + .save {r4, r5, r6, lr} + push {r4, r5, r6, lr} + ldr r3, [r2] + ldm r1, {r12, lr} + ldr r2, [r2, #4] + mov r5, #0 + umull r1, r4, r12, r3 + umlal r4, r5, lr, r3 + umull r3, r6, r12, r2 + str r1, [r0] + mov r1, #0 + adds r3, r3, r4 + str r3, [r0, #4] + umull r3, r4, lr, r2 + adcs r2, r3, r5 + adc r1, r1, #0 + adds r2, r2, r6 + adc r1, r1, r4 + str r2, [r0, #8] + str r1, [r0, #12] + pop {r4, r5, r6, lr} + mov pc, lr +.Lfunc_end21: + .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre2L + .align 2 + .type mcl_fpDbl_sqrPre2L,%function +mcl_fpDbl_sqrPre2L: @ @mcl_fpDbl_sqrPre2L + .fnstart +@ BB#0: + .save {r4, r5, r6, lr} + push {r4, r5, r6, lr} + ldr r2, [r1] + ldr r1, [r1, #4] + mov r4, #0 + mov lr, #0 + umull r12, r3, r2, r2 + umull r5, r6, r1, r2 + umlal r3, r4, r1, r2 + str r12, [r0] + adds r2, r3, r5 + umull r3, r5, r1, r1 + adcs r1, r4, r3 + str r2, [r0, #4] + adc r3, lr, #0 + adds r1, r1, r6 + adc r3, r3, r5 + str r1, [r0, #8] + str r3, [r0, #12] + pop {r4, r5, r6, lr} + mov pc, lr +.Lfunc_end22: + .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L + .cantunwind + .fnend + + .globl mcl_fp_mont2L + .align 2 + .type mcl_fp_mont2L,%function +mcl_fp_mont2L: @ @mcl_fp_mont2L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldm r1, {r12, lr} + ldm r2, {r1, r2} + mov r7, #0 + mov r5, #0 + mov r6, #0 + umull r8, r9, r2, r12 + umull r11, r4, r12, r1 + umlal r9, r7, r2, lr + umlal r4, r5, lr, r1 + ldmda r3, {r12, lr} + ldr r10, [r3, #4] + mul r1, r11, r12 + umull r3, r2, r1, lr + adds r3, r3, r11 + mov r3, #0 + umlal r2, r3, r1, r10 + adcs r1, r2, r4 + adcs r2, r3, r5 + adc r3, r6, #0 + adds r1, r1, r8 + adcs r8, r2, r9 + mul r5, r1, r12 + adcs r3, r3, r7 + umull r7, r2, r5, lr + adc r4, r6, #0 + umlal r2, r6, r5, r10 + adds r1, r7, r1 + adcs r1, r2, r8 + adcs r2, r6, r3 + adc r3, r4, #0 + subs r7, r1, lr + sbcs r6, r2, r10 + sbc r3, r3, #0 + ands r3, r3, #1 + movne r7, r1 + movne r6, r2 + str r7, [r0] + str r6, [r0, #4] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end23: + .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L + .cantunwind + .fnend + + .globl mcl_fp_montNF2L + .align 2 + .type mcl_fp_montNF2L,%function +mcl_fp_montNF2L: @ @mcl_fp_montNF2L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldm r2, {r12, lr} + ldr r11, [r1] + ldr r8, [r3, #-4] + ldr r7, [r3] + ldr r9, [r1, #4] + ldr r3, [r3, #4] + umull r4, r5, r11, r12 + mul r6, r4, r8 + umull r1, r10, r6, r7 + adds r1, r1, r4 + mov r4, #0 + umlal r5, r4, r9, r12 + umull r2, r12, r6, r3 + mov r1, #0 + adcs r2, r2, r5 + adc r4, r4, #0 + adds r2, r2, r10 + adc r6, r4, r12 + umull r5, r4, lr, r11 + adds r2, r5, r2 + umlal r4, r1, lr, r9 + adcs r9, r4, r6 + mul r5, r2, r8 + adc lr, r1, #0 + umull r1, r6, r5, r7 + umull r4, r12, r5, r3 + adds r1, r1, r2 + adcs r1, r4, r9 + adc r2, lr, #0 + adds r1, r1, r6 + adc r2, r2, r12 + subs r7, r1, r7 + sbc r3, r2, r3 + cmp r3, #0 + movlt r7, r1 + movlt r3, r2 + str r7, [r0] + str r3, [r0, #4] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end24: + .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L + .cantunwind + .fnend + + .globl mcl_fp_montRed2L + .align 2 + .type mcl_fp_montRed2L,%function +mcl_fp_montRed2L: @ @mcl_fp_montRed2L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, lr} + push {r4, r5, r6, r7, r8, r9, r10, lr} + ldr r12, [r2, #-4] + ldm r2, {r3, lr} + ldm r1, {r2, r9, r10} + ldr r8, [r1, #12] + mov r5, #0 + mov r7, #0 + mul r6, r2, r12 + umull r1, r4, r6, r3 + umlal r4, r5, r6, lr + adds r1, r2, r1 + adcs r1, r9, r4 + adcs r9, r10, r5 + mul r6, r1, r12 + adcs r8, r8, #0 + umull r2, r4, r6, r3 + adc r5, r7, #0 + umlal r4, r7, r6, lr + adds r1, r2, r1 + adcs r1, r4, r9 + adcs r2, r7, r8 + adc r7, r5, #0 + subs r3, r1, r3 + sbcs r6, r2, lr + sbc r7, r7, #0 + ands r7, r7, #1 + movne r3, r1 + movne r6, r2 + stm r0, {r3, r6} + pop {r4, r5, r6, r7, r8, r9, r10, lr} + mov pc, lr +.Lfunc_end25: + .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L + .cantunwind + .fnend + + .globl mcl_fp_addPre2L + .align 2 + .type mcl_fp_addPre2L,%function +mcl_fp_addPre2L: @ @mcl_fp_addPre2L + .fnstart +@ BB#0: + ldm r1, {r3, r12} + ldm r2, {r1, r2} + adds r1, r1, r3 + adcs r2, r2, r12 + stm r0, {r1, r2} + mov r0, #0 + adc r0, r0, #0 + mov pc, lr +.Lfunc_end26: + .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L + .cantunwind + .fnend + + .globl mcl_fp_subPre2L + .align 2 + .type mcl_fp_subPre2L,%function +mcl_fp_subPre2L: @ @mcl_fp_subPre2L + .fnstart +@ BB#0: + ldm r2, {r3, r12} + ldr r2, [r1] + ldr r1, [r1, #4] + subs r2, r2, r3 + sbcs r1, r1, r12 + str r2, [r0] + str r1, [r0, #4] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + mov pc, lr +.Lfunc_end27: + .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L + .cantunwind + .fnend + + .globl mcl_fp_shr1_2L + .align 2 + .type mcl_fp_shr1_2L,%function +mcl_fp_shr1_2L: @ @mcl_fp_shr1_2L + .fnstart +@ BB#0: + ldr r2, [r1] + ldr r1, [r1, #4] + lsrs r3, r1, #1 + lsr r1, r1, #1 + rrx r2, r2 + str r2, [r0] + str r1, [r0, #4] + mov pc, lr +.Lfunc_end28: + .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L + .cantunwind + .fnend + + .globl mcl_fp_add2L + .align 2 + .type mcl_fp_add2L,%function +mcl_fp_add2L: @ @mcl_fp_add2L + .fnstart +@ BB#0: + .save {r4, lr} + push {r4, lr} + ldm r1, {r12, lr} + ldm r2, {r1, r2} + adds r12, r1, r12 + mov r1, #0 + adcs r2, r2, lr + str r12, [r0] + str r2, [r0, #4] + adc lr, r1, #0 + ldm r3, {r1, r4} + subs r3, r12, r1 + sbcs r2, r2, r4 + sbc r1, lr, #0 + tst r1, #1 + streq r3, [r0] + streq r2, [r0, #4] + pop {r4, lr} + mov pc, lr +.Lfunc_end29: + .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L + .cantunwind + .fnend + + .globl mcl_fp_addNF2L + .align 2 + .type mcl_fp_addNF2L,%function +mcl_fp_addNF2L: @ @mcl_fp_addNF2L + .fnstart +@ BB#0: + .save {r4, lr} + push {r4, lr} + ldm r1, {r12, lr} + ldm r2, {r1, r2} + adds r1, r1, r12 + adc r4, r2, lr + ldm r3, {r12, lr} + subs r3, r1, r12 + sbc r2, r4, lr + cmp r2, #0 + movlt r3, r1 + movlt r2, r4 + str r3, [r0] + str r2, [r0, #4] + pop {r4, lr} + mov pc, lr +.Lfunc_end30: + .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L + .cantunwind + .fnend + + .globl mcl_fp_sub2L + .align 2 + .type mcl_fp_sub2L,%function +mcl_fp_sub2L: @ @mcl_fp_sub2L + .fnstart +@ BB#0: + .save {r4, lr} + push {r4, lr} + ldm r2, {r12, lr} + ldm r1, {r2, r4} + subs r1, r2, r12 + sbcs r2, r4, lr + mov r4, #0 + sbc r4, r4, #0 + stm r0, {r1, r2} + tst r4, #1 + popeq {r4, lr} + moveq pc, lr + ldr r4, [r3] + ldr r3, [r3, #4] + adds r1, r4, r1 + adc r2, r3, r2 + stm r0, {r1, r2} + pop {r4, lr} + mov pc, lr +.Lfunc_end31: + .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L + .cantunwind + .fnend + + .globl mcl_fp_subNF2L + .align 2 + .type mcl_fp_subNF2L,%function +mcl_fp_subNF2L: @ @mcl_fp_subNF2L + .fnstart +@ BB#0: + .save {r4, lr} + push {r4, lr} + ldm r2, {r12, lr} + ldr r2, [r1] + ldr r1, [r1, #4] + subs r4, r2, r12 + sbc r1, r1, lr + ldm r3, {r12, lr} + adds r3, r4, r12 + adc r2, r1, lr + cmp r1, #0 + movge r3, r4 + movge r2, r1 + str r3, [r0] + str r2, [r0, #4] + pop {r4, lr} + mov pc, lr +.Lfunc_end32: + .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L + .cantunwind + .fnend + + .globl mcl_fpDbl_add2L + .align 2 + .type mcl_fpDbl_add2L,%function +mcl_fpDbl_add2L: @ @mcl_fpDbl_add2L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r11, lr} + push {r4, r5, r6, r7, r11, lr} + ldm r1, {r12, lr} + ldr r4, [r1, #8] + ldr r1, [r1, #12] + ldm r2, {r5, r6, r7} + ldr r2, [r2, #12] + adds r5, r5, r12 + adcs r6, r6, lr + str r5, [r0] + adcs r7, r7, r4 + str r6, [r0, #4] + mov r6, #0 + adcs r1, r2, r1 + adc r2, r6, #0 + ldr r6, [r3] + ldr r3, [r3, #4] + subs r6, r7, r6 + sbcs r3, r1, r3 + sbc r2, r2, #0 + ands r2, r2, #1 + movne r6, r7 + movne r3, r1 + str r6, [r0, #8] + str r3, [r0, #12] + pop {r4, r5, r6, r7, r11, lr} + mov pc, lr +.Lfunc_end33: + .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub2L + .align 2 + .type mcl_fpDbl_sub2L,%function +mcl_fpDbl_sub2L: @ @mcl_fpDbl_sub2L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r11, lr} + push {r4, r5, r6, r7, r11, lr} + ldm r2, {r12, lr} + ldr r4, [r2, #8] + ldr r2, [r2, #12] + ldm r1, {r5, r6, r7} + ldr r1, [r1, #12] + subs r5, r5, r12 + sbcs r6, r6, lr + str r5, [r0] + sbcs r7, r7, r4 + str r6, [r0, #4] + mov r6, #0 + sbcs r1, r1, r2 + sbc r2, r6, #0 + ldr r6, [r3] + ldr r3, [r3, #4] + adds r6, r7, r6 + adc r3, r1, r3 + ands r2, r2, #1 + moveq r6, r7 + moveq r3, r1 + str r6, [r0, #8] + str r3, [r0, #12] + pop {r4, r5, r6, r7, r11, lr} + mov pc, lr +.Lfunc_end34: + .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre3L + .align 2 + .type mcl_fp_mulUnitPre3L,%function +mcl_fp_mulUnitPre3L: @ @mcl_fp_mulUnitPre3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, lr} + push {r4, r5, r6, r7, r8, lr} + ldr r12, [r1] + ldmib r1, {r3, r5} + umull lr, r4, r12, r2 + umull r1, r12, r5, r2 + umull r7, r8, r3, r2 + mov r5, r1 + mov r6, r4 + str lr, [r0] + umlal r6, r5, r3, r2 + adds r2, r4, r7 + adcs r1, r8, r1 + str r6, [r0, #4] + str r5, [r0, #8] + adc r1, r12, #0 + str r1, [r0, #12] + pop {r4, r5, r6, r7, r8, lr} + mov pc, lr +.Lfunc_end35: + .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre3L + .align 2 + .type mcl_fpDbl_mulPre3L,%function +mcl_fpDbl_mulPre3L: @ @mcl_fpDbl_mulPre3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldr r3, [r2] + ldm r1, {r12, lr} + ldr r1, [r1, #8] + umull r4, r5, r12, r3 + str r4, [r0] + umull r4, r6, lr, r3 + adds r4, r5, r4 + umull r7, r4, r1, r3 + adcs r6, r6, r7 + umlal r5, r7, lr, r3 + ldr r3, [r2, #4] + ldr r2, [r2, #8] + adc r8, r4, #0 + umull r6, r10, r12, r3 + adds r9, r6, r5 + umull r6, r5, lr, r3 + adcs r6, r6, r7 + umull r7, r4, r1, r3 + str r9, [r0, #4] + adcs r3, r7, r8 + mov r8, #0 + adc r7, r8, #0 + adds r6, r6, r10 + adcs r11, r3, r5 + umull r5, r9, r1, r2 + umull r1, r10, lr, r2 + adc r4, r7, r4 + umull r7, r3, r12, r2 + adds r2, r6, r7 + adcs r1, r11, r1 + str r2, [r0, #8] + adcs r2, r4, r5 + adc r7, r8, #0 + adds r1, r1, r3 + str r1, [r0, #12] + adcs r1, r2, r10 + str r1, [r0, #16] + adc r1, r7, r9 + str r1, [r0, #20] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end36: + .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre3L + .align 2 + .type mcl_fpDbl_sqrPre3L,%function +mcl_fpDbl_sqrPre3L: @ @mcl_fpDbl_sqrPre3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, lr} + push {r4, r5, r6, r7, r8, r9, r10, lr} + ldm r1, {r2, r3, r12} + mov r10, #0 + umull r1, lr, r2, r2 + umull r7, r4, r3, r2 + str r1, [r0] + umull r1, r8, r12, r2 + mov r5, lr + mov r6, r1 + umlal r5, r6, r3, r2 + adds r2, lr, r7 + adcs r2, r4, r1 + adc r2, r8, #0 + adds lr, r5, r7 + umull r5, r9, r3, r3 + adcs r5, r6, r5 + umull r6, r7, r12, r3 + str lr, [r0, #4] + adcs r2, r2, r6 + adc r3, r10, #0 + adds r4, r5, r4 + adcs r2, r2, r9 + adc r3, r3, r7 + adds r1, r4, r1 + umull r5, r4, r12, r12 + str r1, [r0, #8] + adcs r1, r2, r6 + adcs r2, r3, r5 + adc r3, r10, #0 + adds r1, r1, r8 + str r1, [r0, #12] + adcs r1, r2, r7 + str r1, [r0, #16] + adc r1, r3, r4 + str r1, [r0, #20] + pop {r4, r5, r6, r7, r8, r9, r10, lr} + mov pc, lr +.Lfunc_end37: + .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L + .cantunwind + .fnend + + .globl mcl_fp_mont3L + .align 2 + .type mcl_fp_mont3L,%function +mcl_fp_mont3L: @ @mcl_fp_mont3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #44 + sub sp, sp, #44 + str r0, [sp, #24] @ 4-byte Spill + ldm r2, {r8, lr} + ldr r0, [r2, #8] + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [r1] + str r0, [sp, #36] @ 4-byte Spill + ldmib r1, {r4, r9} + ldr r2, [r3, #-4] + umull r7, r6, r0, r8 + ldr r0, [r3] + ldr r1, [r3, #8] + ldr r10, [r3, #4] + str r7, [sp, #12] @ 4-byte Spill + mul r5, r7, r2 + str r2, [sp, #16] @ 4-byte Spill + str r9, [sp, #32] @ 4-byte Spill + str r0, [sp, #40] @ 4-byte Spill + str r1, [sp, #28] @ 4-byte Spill + umull r12, r2, r5, r1 + umull r1, r3, r5, r0 + umull r0, r7, r9, r8 + umull r11, r9, r4, r8 + str r7, [sp] @ 4-byte Spill + adds r7, r6, r11 + str r1, [sp, #8] @ 4-byte Spill + mov r1, r3 + str r2, [sp, #4] @ 4-byte Spill + mov r2, r12 + adcs r7, r9, r0 + umlal r1, r2, r5, r10 + umlal r6, r0, r4, r8 + mov r8, #0 + ldr r7, [sp] @ 4-byte Reload + adc r9, r7, #0 + umull r7, r11, r5, r10 + ldr r5, [sp, #8] @ 4-byte Reload + adds r3, r3, r7 + ldr r7, [sp, #12] @ 4-byte Reload + adcs r3, r11, r12 + ldr r3, [sp, #4] @ 4-byte Reload + adc r3, r3, #0 + adds r7, r5, r7 + adcs r11, r1, r6 + adcs r12, r2, r0 + ldr r0, [sp, #32] @ 4-byte Reload + adcs r9, r3, r9 + ldr r3, [sp, #36] @ 4-byte Reload + adc r8, r8, #0 + umull r6, r7, lr, r0 + umull r5, r0, lr, r4 + umull r1, r2, lr, r3 + adds r5, r2, r5 + adcs r0, r0, r6 + umlal r2, r6, lr, r4 + adc r0, r7, #0 + adds r1, r11, r1 + ldr r11, [sp, #16] @ 4-byte Reload + adcs r2, r12, r2 + ldr r12, [sp, #28] @ 4-byte Reload + str r2, [sp, #12] @ 4-byte Spill + adcs r2, r9, r6 + str r2, [sp, #8] @ 4-byte Spill + ldr r2, [sp, #40] @ 4-byte Reload + adcs r0, r8, r0 + str r0, [sp, #4] @ 4-byte Spill + mov r0, #0 + mul r6, r1, r11 + adc r0, r0, #0 + umull r7, r9, r6, r12 + str r0, [sp] @ 4-byte Spill + mov r5, r7 + umull r8, r0, r6, r2 + umull lr, r2, r6, r10 + mov r3, r0 + adds r0, r0, lr + ldr lr, [sp, #36] @ 4-byte Reload + adcs r0, r2, r7 + umlal r3, r5, r6, r10 + adc r0, r9, #0 + adds r1, r8, r1 + ldr r1, [sp, #12] @ 4-byte Reload + adcs r1, r3, r1 + ldr r3, [sp, #20] @ 4-byte Reload + str r1, [sp, #12] @ 4-byte Spill + ldr r1, [sp, #8] @ 4-byte Reload + adcs r8, r5, r1 + ldr r1, [sp, #4] @ 4-byte Reload + adcs r9, r0, r1 + ldr r0, [sp] @ 4-byte Reload + umull r1, r2, r3, lr + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #32] @ 4-byte Reload + umull r6, r7, r3, r0 + umull r5, r0, r3, r4 + adds r5, r2, r5 + adcs r0, r0, r6 + umlal r2, r6, r3, r4 + ldr r3, [sp, #12] @ 4-byte Reload + adc r0, r7, #0 + adds r1, r3, r1 + adcs r2, r8, r2 + str r2, [sp, #36] @ 4-byte Spill + ldr r2, [sp, #8] @ 4-byte Reload + adcs r9, r9, r6 + mul r6, r1, r11 + umull r7, r4, r6, r12 + ldr r12, [sp, #40] @ 4-byte Reload + mov r5, r7 + adcs r0, r2, r0 + str r0, [sp, #32] @ 4-byte Spill + mov r0, #0 + umull r11, r3, r6, r12 + adc r8, r0, #0 + umull r0, lr, r6, r10 + mov r2, r3 + adds r0, r3, r0 + ldr r3, [sp, #32] @ 4-byte Reload + umlal r2, r5, r6, r10 + adcs r0, lr, r7 + adc r0, r4, #0 + adds r1, r11, r1 + ldr r1, [sp, #36] @ 4-byte Reload + adcs r1, r2, r1 + adcs r2, r5, r9 + ldr r5, [sp, #28] @ 4-byte Reload + adcs r0, r0, r3 + adc r3, r8, #0 + subs r7, r1, r12 + sbcs r6, r2, r10 + sbcs r5, r0, r5 + sbc r3, r3, #0 + ands r3, r3, #1 + movne r5, r0 + ldr r0, [sp, #24] @ 4-byte Reload + movne r7, r1 + movne r6, r2 + str r7, [r0] + str r6, [r0, #4] + str r5, [r0, #8] + add sp, sp, #44 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end38: + .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L + .cantunwind + .fnend + + .globl mcl_fp_montNF3L + .align 2 + .type mcl_fp_montNF3L,%function +mcl_fp_montNF3L: @ @mcl_fp_montNF3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #68 + sub sp, sp, #68 + str r0, [sp, #64] @ 4-byte Spill + ldr r8, [r1] + ldmib r1, {r6, r9} + ldm r2, {r4, r7} + ldr r0, [r2, #8] + mov r10, r3 + umull r3, r1, r0, r9 + str r1, [sp, #52] @ 4-byte Spill + umull r1, r2, r0, r8 + str r3, [sp, #44] @ 4-byte Spill + str r1, [sp, #48] @ 4-byte Spill + str r2, [sp, #40] @ 4-byte Spill + mov r1, r2 + mov r2, r3 + umull r3, r5, r0, r6 + umlal r1, r2, r0, r6 + str r3, [sp, #32] @ 4-byte Spill + umull r3, r0, r7, r6 + str r5, [sp, #36] @ 4-byte Spill + str r1, [sp, #56] @ 4-byte Spill + str r2, [sp, #60] @ 4-byte Spill + umull r2, r1, r7, r9 + str r0, [sp, #8] @ 4-byte Spill + str r3, [sp, #4] @ 4-byte Spill + str r1, [sp, #28] @ 4-byte Spill + umull r1, r11, r7, r8 + str r2, [sp, #16] @ 4-byte Spill + str r1, [sp, #24] @ 4-byte Spill + mov r1, r2 + str r11, [sp, #12] @ 4-byte Spill + umlal r11, r1, r7, r6 + umull r0, r7, r6, r4 + str r1, [sp, #20] @ 4-byte Spill + umull lr, r1, r9, r4 + umull r9, r2, r8, r4 + ldr r8, [r10, #-4] + adds r0, r2, r0 + str r1, [sp] @ 4-byte Spill + mov r1, r2 + mov r12, lr + adcs r0, r7, lr + umlal r1, r12, r6, r4 + ldr r0, [sp] @ 4-byte Reload + ldm r10, {r6, r7} + mul r2, r9, r8 + adc r3, r0, #0 + ldr r0, [r10, #8] + umull r4, lr, r2, r6 + adds r4, r4, r9 + umull r4, r9, r2, r7 + adcs r1, r4, r1 + umull r4, r5, r2, r0 + adcs r2, r4, r12 + ldr r4, [sp, #4] @ 4-byte Reload + adc r3, r3, #0 + adds r1, r1, lr + adcs r2, r2, r9 + adc r3, r3, r5 + ldr r5, [sp, #12] @ 4-byte Reload + adds r5, r5, r4 + ldr r4, [sp, #8] @ 4-byte Reload + ldr r5, [sp, #16] @ 4-byte Reload + adcs r5, r4, r5 + ldr r4, [sp, #24] @ 4-byte Reload + ldr r5, [sp, #28] @ 4-byte Reload + adc r5, r5, #0 + adds r1, r4, r1 + ldr r4, [sp, #20] @ 4-byte Reload + adcs r2, r11, r2 + adcs r12, r4, r3 + mul r4, r1, r8 + umull r3, r9, r4, r6 + adc lr, r5, #0 + adds r1, r3, r1 + umull r1, r3, r4, r7 + adcs r1, r1, r2 + umull r2, r5, r4, r0 + adcs r2, r2, r12 + adc r4, lr, #0 + adds r1, r1, r9 + adcs r12, r2, r3 + ldr r2, [sp, #40] @ 4-byte Reload + ldr r3, [sp, #32] @ 4-byte Reload + adc r9, r4, r5 + adds r5, r2, r3 + ldr r2, [sp, #44] @ 4-byte Reload + ldr r3, [sp, #36] @ 4-byte Reload + adcs r5, r3, r2 + ldr r2, [sp, #52] @ 4-byte Reload + ldr r5, [sp, #60] @ 4-byte Reload + adc lr, r2, #0 + ldr r2, [sp, #48] @ 4-byte Reload + adds r1, r2, r1 + mul r4, r1, r8 + umull r10, r2, r4, r0 + umull r3, r8, r4, r7 + str r2, [sp, #52] @ 4-byte Spill + umull r2, r11, r4, r6 + ldr r4, [sp, #56] @ 4-byte Reload + adcs r4, r4, r12 + adcs r12, r5, r9 + adc r5, lr, #0 + adds r1, r2, r1 + adcs r1, r3, r4 + adcs r2, r10, r12 + adc r3, r5, #0 + ldr r5, [sp, #52] @ 4-byte Reload + adds r1, r1, r11 + adcs r2, r2, r8 + adc r3, r3, r5 + subs r6, r1, r6 + sbcs r7, r2, r7 + sbc r0, r3, r0 + asr r5, r0, #31 + cmp r5, #0 + movlt r6, r1 + ldr r1, [sp, #64] @ 4-byte Reload + movlt r7, r2 + movlt r0, r3 + stm r1, {r6, r7} + str r0, [r1, #8] + add sp, sp, #68 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end39: + .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L + .cantunwind + .fnend + + .globl mcl_fp_montRed3L + .align 2 + .type mcl_fp_montRed3L,%function +mcl_fp_montRed3L: @ @mcl_fp_montRed3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #28 + sub sp, sp, #28 + ldr r5, [r2] + ldr lr, [r2, #-4] + ldr r3, [r2, #4] + ldr r2, [r2, #8] + str r0, [sp, #24] @ 4-byte Spill + str r5, [sp, #20] @ 4-byte Spill + str r2, [sp] @ 4-byte Spill + ldm r1, {r4, r7} + str r7, [sp, #8] @ 4-byte Spill + ldr r7, [r1, #8] + mul r6, r4, lr + umull r10, r8, r6, r3 + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [r1, #12] + str r7, [sp, #12] @ 4-byte Spill + umull r7, r9, r6, r2 + umull r11, r2, r6, r5 + mov r0, r2 + adds r2, r2, r10 + mov r12, r7 + adcs r2, r8, r7 + umlal r0, r12, r6, r3 + ldr r8, [r1, #20] + ldr r1, [r1, #16] + ldr r2, [sp, #8] @ 4-byte Reload + adc r10, r9, #0 + adds r7, r4, r11 + mov r11, lr + adcs r9, r2, r0 + ldr r2, [sp] @ 4-byte Reload + mul r7, r9, lr + umull lr, r0, r7, r2 + str r0, [sp, #8] @ 4-byte Spill + umull r4, r0, r7, r5 + ldr r5, [sp, #16] @ 4-byte Reload + mov r6, lr + str r4, [sp, #4] @ 4-byte Spill + mov r4, r0 + umlal r4, r6, r7, r3 + adcs r12, r5, r12 + ldr r5, [sp, #12] @ 4-byte Reload + adcs r10, r5, r10 + adcs r1, r1, #0 + str r1, [sp, #16] @ 4-byte Spill + adcs r1, r8, #0 + str r1, [sp, #12] @ 4-byte Spill + mov r1, #0 + adc r8, r1, #0 + umull r1, r5, r7, r3 + ldr r7, [sp, #16] @ 4-byte Reload + adds r1, r0, r1 + adcs r0, r5, lr + ldr r1, [sp, #4] @ 4-byte Reload + ldr r0, [sp, #8] @ 4-byte Reload + adc r0, r0, #0 + adds r1, r1, r9 + adcs r1, r4, r12 + adcs lr, r6, r10 + ldr r6, [sp, #20] @ 4-byte Reload + mul r5, r1, r11 + mov r11, r2 + adcs r0, r0, r7 + umull r4, r12, r5, r2 + umull r2, r7, r5, r3 + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r10, r0, #0 + umull r9, r0, r5, r6 + adc r8, r8, #0 + adds r2, r0, r2 + mov r2, r4 + adcs r4, r7, r4 + adc r7, r12, #0 + adds r1, r9, r1 + umlal r0, r2, r5, r3 + ldr r1, [sp, #16] @ 4-byte Reload + adcs r0, r0, lr + adcs r1, r2, r1 + adcs r2, r7, r10 + adc r7, r8, #0 + subs r6, r0, r6 + sbcs r3, r1, r3 + sbcs r5, r2, r11 + sbc r7, r7, #0 + ands r7, r7, #1 + movne r6, r0 + ldr r0, [sp, #24] @ 4-byte Reload + movne r3, r1 + movne r5, r2 + str r6, [r0] + stmib r0, {r3, r5} + add sp, sp, #28 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end40: + .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L + .cantunwind + .fnend + + .globl mcl_fp_addPre3L + .align 2 + .type mcl_fp_addPre3L,%function +mcl_fp_addPre3L: @ @mcl_fp_addPre3L + .fnstart +@ BB#0: + .save {r4, lr} + push {r4, lr} + ldm r1, {r3, r12, lr} + ldm r2, {r1, r4} + ldr r2, [r2, #8] + adds r1, r1, r3 + adcs r3, r4, r12 + adcs r2, r2, lr + stm r0, {r1, r3} + str r2, [r0, #8] + mov r0, #0 + adc r0, r0, #0 + pop {r4, lr} + mov pc, lr +.Lfunc_end41: + .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L + .cantunwind + .fnend + + .globl mcl_fp_subPre3L + .align 2 + .type mcl_fp_subPre3L,%function +mcl_fp_subPre3L: @ @mcl_fp_subPre3L + .fnstart +@ BB#0: + .save {r4, lr} + push {r4, lr} + ldm r2, {r3, r12, lr} + ldm r1, {r2, r4} + ldr r1, [r1, #8] + subs r2, r2, r3 + sbcs r3, r4, r12 + sbcs r1, r1, lr + stm r0, {r2, r3} + str r1, [r0, #8] + mov r0, #0 + sbc r0, r0, #0 + and r0, r0, #1 + pop {r4, lr} + mov pc, lr +.Lfunc_end42: + .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L + .cantunwind + .fnend + + .globl mcl_fp_shr1_3L + .align 2 + .type mcl_fp_shr1_3L,%function +mcl_fp_shr1_3L: @ @mcl_fp_shr1_3L + .fnstart +@ BB#0: + ldr r3, [r1, #4] + ldr r12, [r1] + ldr r1, [r1, #8] + lsrs r2, r3, #1 + lsr r3, r3, #1 + orr r3, r3, r1, lsl #31 + rrx r2, r12 + lsr r1, r1, #1 + stm r0, {r2, r3} + str r1, [r0, #8] + mov pc, lr +.Lfunc_end43: + .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L + .cantunwind + .fnend + + .globl mcl_fp_add3L + .align 2 + .type mcl_fp_add3L,%function +mcl_fp_add3L: @ @mcl_fp_add3L + .fnstart +@ BB#0: + .save {r4, r5, r11, lr} + push {r4, r5, r11, lr} + ldm r1, {r12, lr} + ldr r1, [r1, #8] + ldm r2, {r4, r5} + ldr r2, [r2, #8] + adds r4, r4, r12 + adcs r5, r5, lr + adcs r1, r2, r1 + stm r0, {r4, r5} + mov r2, #0 + str r1, [r0, #8] + adc r12, r2, #0 + ldm r3, {r2, lr} + ldr r3, [r3, #8] + subs r4, r4, r2 + sbcs r5, r5, lr + sbcs r3, r1, r3 + sbc r1, r12, #0 + tst r1, #1 + stmeq r0, {r4, r5} + streq r3, [r0, #8] + pop {r4, r5, r11, lr} + mov pc, lr +.Lfunc_end44: + .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L + .cantunwind + .fnend + + .globl mcl_fp_addNF3L + .align 2 + .type mcl_fp_addNF3L,%function +mcl_fp_addNF3L: @ @mcl_fp_addNF3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r11, lr} + push {r4, r5, r6, r7, r11, lr} + ldm r1, {r12, lr} + ldr r1, [r1, #8] + ldm r2, {r4, r5} + ldr r2, [r2, #8] + adds r4, r4, r12 + adcs r5, r5, lr + adc r7, r2, r1 + ldm r3, {r2, r12, lr} + subs r2, r4, r2 + sbcs r3, r5, r12 + sbc r1, r7, lr + asr r6, r1, #31 + cmp r6, #0 + movlt r2, r4 + movlt r3, r5 + movlt r1, r7 + stm r0, {r2, r3} + str r1, [r0, #8] + pop {r4, r5, r6, r7, r11, lr} + mov pc, lr +.Lfunc_end45: + .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L + .cantunwind + .fnend + + .globl mcl_fp_sub3L + .align 2 + .type mcl_fp_sub3L,%function +mcl_fp_sub3L: @ @mcl_fp_sub3L + .fnstart +@ BB#0: + .save {r4, r5, r6, lr} + push {r4, r5, r6, lr} + ldm r2, {r12, lr} + ldr r4, [r2, #8] + ldm r1, {r2, r5, r6} + subs r1, r2, r12 + sbcs r2, r5, lr + sbcs r12, r6, r4 + mov r6, #0 + sbc r6, r6, #0 + stm r0, {r1, r2, r12} + tst r6, #1 + popeq {r4, r5, r6, lr} + moveq pc, lr + ldr r6, [r3] + ldr r5, [r3, #4] + ldr r3, [r3, #8] + adds r1, r6, r1 + adcs r2, r5, r2 + adc r3, r3, r12 + stm r0, {r1, r2, r3} + pop {r4, r5, r6, lr} + mov pc, lr +.Lfunc_end46: + .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L + .cantunwind + .fnend + + .globl mcl_fp_subNF3L + .align 2 + .type mcl_fp_subNF3L,%function +mcl_fp_subNF3L: @ @mcl_fp_subNF3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r11, lr} + push {r4, r5, r6, r7, r11, lr} + ldm r2, {r12, lr} + ldr r2, [r2, #8] + ldm r1, {r4, r5} + ldr r1, [r1, #8] + subs r4, r4, r12 + sbcs r7, r5, lr + sbc r1, r1, r2 + ldm r3, {r2, r12, lr} + asr r6, r1, #31 + adds r2, r4, r2 + adcs r3, r7, r12 + adc r5, r1, lr + cmp r6, #0 + movge r2, r4 + movge r3, r7 + movge r5, r1 + stm r0, {r2, r3, r5} + pop {r4, r5, r6, r7, r11, lr} + mov pc, lr +.Lfunc_end47: + .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L + .cantunwind + .fnend + + .globl mcl_fpDbl_add3L + .align 2 + .type mcl_fpDbl_add3L,%function +mcl_fpDbl_add3L: @ @mcl_fpDbl_add3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldm r1, {r12, lr} + ldr r7, [r2] + ldr r11, [r1, #8] + ldr r9, [r1, #12] + ldr r10, [r1, #16] + ldr r8, [r1, #20] + ldmib r2, {r1, r5, r6} + ldr r4, [r2, #16] + ldr r2, [r2, #20] + adds r7, r7, r12 + adcs r1, r1, lr + str r7, [r0] + str r1, [r0, #4] + adcs r1, r5, r11 + ldr r5, [r3] + adcs r7, r6, r9 + str r1, [r0, #8] + mov r1, #0 + adcs r6, r4, r10 + ldr r4, [r3, #4] + ldr r3, [r3, #8] + adcs r2, r2, r8 + adc r1, r1, #0 + subs r5, r7, r5 + sbcs r4, r6, r4 + sbcs r3, r2, r3 + sbc r1, r1, #0 + ands r1, r1, #1 + movne r5, r7 + movne r4, r6 + movne r3, r2 + str r5, [r0, #12] + str r4, [r0, #16] + str r3, [r0, #20] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end48: + .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L + .cantunwind + .fnend + + .globl mcl_fpDbl_sub3L + .align 2 + .type mcl_fpDbl_sub3L,%function +mcl_fpDbl_sub3L: @ @mcl_fpDbl_sub3L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ldm r2, {r12, lr} + ldr r7, [r1] + ldr r11, [r2, #8] + ldr r9, [r2, #12] + ldr r10, [r2, #16] + ldr r8, [r2, #20] + ldmib r1, {r2, r5, r6} + ldr r4, [r1, #16] + ldr r1, [r1, #20] + subs r7, r7, r12 + sbcs r2, r2, lr + str r7, [r0] + str r2, [r0, #4] + sbcs r2, r5, r11 + ldr r5, [r3] + sbcs r7, r6, r9 + str r2, [r0, #8] + mov r2, #0 + sbcs r6, r4, r10 + ldr r4, [r3, #4] + ldr r3, [r3, #8] + sbcs r1, r1, r8 + sbc r2, r2, #0 + adds r5, r7, r5 + adcs r4, r6, r4 + adc r3, r1, r3 + ands r2, r2, #1 + moveq r5, r7 + moveq r4, r6 + moveq r3, r1 + str r5, [r0, #12] + str r4, [r0, #16] + str r3, [r0, #20] + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end49: + .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L + .cantunwind + .fnend + + .globl mcl_fp_mulUnitPre4L + .align 2 + .type mcl_fp_mulUnitPre4L,%function +mcl_fp_mulUnitPre4L: @ @mcl_fp_mulUnitPre4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r11, lr} + push {r4, r5, r6, r7, r11, lr} + ldr r12, [r1] + ldmib r1, {r3, lr} + ldr r1, [r1, #12] + umull r4, r6, r12, r2 + umull r7, r12, lr, r2 + str r4, [r0] + mov r5, r6 + mov r4, r7 + umlal r5, r4, r3, r2 + str r5, [r0, #4] + str r4, [r0, #8] + umull r5, lr, r1, r2 + umull r1, r4, r3, r2 + adds r1, r6, r1 + adcs r1, r4, r7 + adcs r1, r12, r5 + str r1, [r0, #12] + adc r1, lr, #0 + str r1, [r0, #16] + pop {r4, r5, r6, r7, r11, lr} + mov pc, lr +.Lfunc_end50: + .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L + .cantunwind + .fnend + + .globl mcl_fpDbl_mulPre4L + .align 2 + .type mcl_fpDbl_mulPre4L,%function +mcl_fpDbl_mulPre4L: @ @mcl_fpDbl_mulPre4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #40 + sub sp, sp, #40 + mov lr, r2 + ldr r11, [r1] + ldr r4, [lr] + ldmib r1, {r8, r12} + ldr r3, [r1, #12] + umull r2, r7, r11, r4 + umull r6, r9, r8, r4 + str r12, [sp] @ 4-byte Spill + adds r6, r7, r6 + str r2, [sp, #36] @ 4-byte Spill + mov r2, r3 + umull r6, r10, r12, r4 + adcs r5, r9, r6 + umlal r7, r6, r8, r4 + umull r5, r9, r3, r4 + ldr r3, [sp, #36] @ 4-byte Reload + ldr r4, [lr, #4] + adcs r10, r10, r5 + str r3, [r0] + adc r3, r9, #0 + str r3, [sp, #24] @ 4-byte Spill + umull r5, r3, r11, r4 + adds r7, r5, r7 + str r3, [sp, #32] @ 4-byte Spill + str r7, [sp, #36] @ 4-byte Spill + umull r7, r3, r8, r4 + str r3, [sp, #28] @ 4-byte Spill + adcs r3, r7, r6 + umull r7, r9, r12, r4 + mov r12, r2 + ldr r6, [sp, #32] @ 4-byte Reload + adcs r7, r7, r10 + umull r5, r10, r2, r4 + ldr r2, [sp, #24] @ 4-byte Reload + mov r4, #0 + adcs r5, r5, r2 + ldr r2, [sp, #28] @ 4-byte Reload + adc r4, r4, #0 + adds r6, r3, r6 + adcs r7, r7, r2 + ldr r2, [lr, #12] + str r7, [sp, #24] @ 4-byte Spill + adcs r7, r5, r9 + str r7, [sp, #20] @ 4-byte Spill + adc r7, r4, r10 + ldr r4, [lr, #8] + str r7, [sp, #16] @ 4-byte Spill + ldr r7, [sp, #36] @ 4-byte Reload + str r7, [r0, #4] + umull r5, r7, r11, r4 + adds r5, r5, r6 + str r7, [sp, #12] @ 4-byte Spill + str r5, [r0, #8] + ldm r1, {r11, lr} + ldr r5, [r1, #8] + ldr r1, [r1, #12] + ldr r3, [sp, #24] @ 4-byte Reload + umull r6, r7, r1, r2 + umull r10, r1, r5, r2 + str r1, [sp, #32] @ 4-byte Spill + umull r5, r1, lr, r2 + str r6, [sp, #8] @ 4-byte Spill + str r7, [sp, #36] @ 4-byte Spill + ldr r7, [sp, #16] @ 4-byte Reload + str r1, [sp, #28] @ 4-byte Spill + umull r6, r1, r11, r2 + umull r2, r11, r12, r4 + str r1, [sp, #4] @ 4-byte Spill + ldr r1, [sp] @ 4-byte Reload + umull lr, r12, r1, r4 + umull r9, r1, r8, r4 + ldr r4, [sp, #20] @ 4-byte Reload + mov r8, #0 + adcs r3, r9, r3 + adcs r4, lr, r4 + adcs r2, r2, r7 + ldr r7, [sp, #12] @ 4-byte Reload + adc lr, r8, #0 + adds r3, r3, r7 + adcs r1, r4, r1 + adcs r2, r2, r12 + adc r4, lr, r11 + adds r3, r6, r3 + ldr r6, [sp, #4] @ 4-byte Reload + str r3, [r0, #12] + ldr r3, [sp, #8] @ 4-byte Reload + adcs r1, r5, r1 + adcs r2, r10, r2 + adcs r3, r3, r4 + adc r7, r8, #0 + adds r1, r1, r6 + str r1, [r0, #16] + ldr r1, [sp, #28] @ 4-byte Reload + adcs r1, r2, r1 + str r1, [r0, #20] + ldr r1, [sp, #32] @ 4-byte Reload + adcs r1, r3, r1 + str r1, [r0, #24] + ldr r1, [sp, #36] @ 4-byte Reload + adc r1, r7, r1 + str r1, [r0, #28] + add sp, sp, #40 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end51: + .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L + .cantunwind + .fnend + + .globl mcl_fpDbl_sqrPre4L + .align 2 + .type mcl_fpDbl_sqrPre4L,%function +mcl_fpDbl_sqrPre4L: @ @mcl_fpDbl_sqrPre4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #16 + sub sp, sp, #16 + ldm r1, {r2, r3, r12} + ldr r8, [r1, #12] + umull r4, r6, r2, r2 + umull r11, lr, r12, r2 + str r4, [r0] + umull r10, r4, r8, r2 + mov r7, r11 + mov r5, r6 + str lr, [sp, #12] @ 4-byte Spill + str r4, [sp, #8] @ 4-byte Spill + umull r4, r9, r3, r2 + umlal r5, r7, r3, r2 + adds r2, r6, r4 + adcs r2, r9, r11 + ldr r2, [sp, #8] @ 4-byte Reload + adcs r10, lr, r10 + adc r2, r2, #0 + adds r4, r4, r5 + str r2, [sp] @ 4-byte Spill + umull r6, r2, r3, r3 + str r4, [sp, #8] @ 4-byte Spill + str r2, [sp, #4] @ 4-byte Spill + ldr r2, [sp] @ 4-byte Reload + adcs r5, r6, r7 + umull r6, r7, r12, r3 + adcs lr, r6, r10 + umull r4, r10, r8, r3 + adcs r3, r4, r2 + ldr r2, [sp, #4] @ 4-byte Reload + mov r4, #0 + adc r4, r4, #0 + adds r5, r5, r9 + adcs r9, lr, r2 + adcs r2, r3, r7 + ldr r3, [sp, #8] @ 4-byte Reload + adc r4, r4, r10 + adds r5, r11, r5 + str r2, [sp, #4] @ 4-byte Spill + umull r2, r10, r8, r12 + umull lr, r8, r12, r12 + adcs r6, r6, r9 + stmib r0, {r3, r5} + mov r5, #0 + ldr r3, [sp, #4] @ 4-byte Reload + adcs r3, lr, r3 + adcs r2, r2, r4 + ldr r4, [sp, #12] @ 4-byte Reload + adc r5, r5, #0 + adds r6, r6, r4 + adcs r11, r3, r7 + adcs lr, r2, r8 + adc r8, r5, r10 + ldr r5, [r1] + ldmib r1, {r4, r7} + ldr r1, [r1, #12] + umull r12, r2, r1, r1 + umull r3, r9, r7, r1 + umull r7, r10, r4, r1 + str r2, [sp, #12] @ 4-byte Spill + umull r4, r2, r5, r1 + adds r1, r4, r6 + adcs r4, r7, r11 + str r1, [r0, #12] + mov r7, #0 + adcs r3, r3, lr + adcs r1, r12, r8 + adc r7, r7, #0 + adds r2, r4, r2 + str r2, [r0, #16] + adcs r2, r3, r10 + adcs r1, r1, r9 + str r2, [r0, #20] + str r1, [r0, #24] + ldr r1, [sp, #12] @ 4-byte Reload + adc r1, r7, r1 + str r1, [r0, #28] + add sp, sp, #16 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end52: + .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L + .cantunwind + .fnend + + .globl mcl_fp_mont4L + .align 2 + .type mcl_fp_mont4L,%function +mcl_fp_mont4L: @ @mcl_fp_mont4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #76 + sub sp, sp, #76 + str r0, [sp, #36] @ 4-byte Spill + ldr r0, [r2, #8] + ldr r9, [r2] + ldr r8, [r2, #4] + ldr r6, [r3, #-4] + ldr r11, [r1, #8] + ldr r10, [r1, #12] + ldr r7, [r3, #8] + ldr r5, [r3, #4] + str r0, [sp, #28] @ 4-byte Spill + ldr r0, [r2, #12] + ldr r2, [r1, #4] + str r6, [sp, #44] @ 4-byte Spill + str r7, [sp, #40] @ 4-byte Spill + str r5, [sp, #52] @ 4-byte Spill + str r11, [sp, #60] @ 4-byte Spill + str r10, [sp, #56] @ 4-byte Spill + str r0, [sp, #32] @ 4-byte Spill + ldr r0, [r1] + ldr r1, [r3] + str r2, [sp, #72] @ 4-byte Spill + ldr r3, [r3, #12] + umull r4, r2, r0, r9 + str r0, [sp, #64] @ 4-byte Spill + str r1, [sp, #48] @ 4-byte Spill + mul r0, r4, r6 + str r4, [sp, #24] @ 4-byte Spill + mov r4, r5 + umull lr, r6, r0, r7 + umull r7, r12, r0, r1 + str r7, [sp, #20] @ 4-byte Spill + ldr r7, [sp, #72] @ 4-byte Reload + str r6, [sp, #16] @ 4-byte Spill + mov r6, r12 + str lr, [sp, #8] @ 4-byte Spill + umlal r6, lr, r0, r5 + umull r5, r1, r10, r9 + str r1, [sp, #68] @ 4-byte Spill + str r5, [sp, #12] @ 4-byte Spill + umull r1, r10, r11, r9 + umull r11, r5, r7, r9 + adds r7, r2, r11 + adcs r5, r5, r1 + ldr r5, [sp, #12] @ 4-byte Reload + adcs r11, r10, r5 + ldr r5, [sp, #68] @ 4-byte Reload + str r3, [sp, #68] @ 4-byte Spill + adc r5, r5, #0 + str r5, [sp, #12] @ 4-byte Spill + umull r5, r7, r0, r3 + umull r10, r3, r0, r4 + ldr r4, [sp, #24] @ 4-byte Reload + adds r0, r12, r10 + mov r12, #0 + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r3, r0 + ldr r0, [sp, #16] @ 4-byte Reload + adcs r0, r0, r5 + ldr r5, [sp, #20] @ 4-byte Reload + adc r3, r7, #0 + ldr r7, [sp, #72] @ 4-byte Reload + adds r4, r5, r4 + umlal r2, r1, r7, r9 + adcs r2, r6, r2 + adcs r1, lr, r1 + str r2, [sp, #24] @ 4-byte Spill + adcs r9, r0, r11 + ldr r0, [sp, #12] @ 4-byte Reload + str r1, [sp, #20] @ 4-byte Spill + adcs r6, r3, r0 + ldr r0, [sp, #56] @ 4-byte Reload + mov r3, r7 + adc r10, r12, #0 + umull r2, r12, r8, r7 + ldr r7, [sp, #64] @ 4-byte Reload + umull r5, r4, r8, r0 + ldr r0, [sp, #60] @ 4-byte Reload + umull r1, lr, r8, r0 + umull r11, r0, r8, r7 + adds r2, r0, r2 + adcs r2, r12, r1 + umlal r0, r1, r8, r3 + ldr r3, [sp, #24] @ 4-byte Reload + ldr r8, [sp, #48] @ 4-byte Reload + adcs r2, lr, r5 + adc r5, r4, #0 + adds r7, r3, r11 + ldr r3, [sp, #20] @ 4-byte Reload + ldr r11, [sp, #40] @ 4-byte Reload + adcs r0, r3, r0 + str r0, [sp, #24] @ 4-byte Spill + adcs r0, r9, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + adcs r0, r6, r2 + str r0, [sp, #16] @ 4-byte Spill + adcs r0, r10, r5 + ldr r10, [sp, #44] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + mul r5, r7, r10 + umull r6, r0, r5, r11 + str r0, [sp] @ 4-byte Spill + umull r0, r3, r5, r8 + mov r4, r6 + str r0, [sp, #4] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + mov r2, r3 + umlal r2, r4, r5, r1 + umull r9, r12, r5, r0 + umull lr, r0, r5, r1 + adds r3, r3, lr + adcs r0, r0, r6 + ldr r3, [sp, #4] @ 4-byte Reload + ldr r0, [sp] @ 4-byte Reload + adcs r0, r0, r9 + adc r1, r12, #0 + adds r3, r3, r7 + ldr r12, [sp, #64] @ 4-byte Reload + ldr r3, [sp, #24] @ 4-byte Reload + adcs r2, r2, r3 + ldr r3, [sp, #28] @ 4-byte Reload + str r2, [sp, #24] @ 4-byte Spill + ldr r2, [sp, #20] @ 4-byte Reload + umull r9, r7, r3, r12 + adcs r2, r4, r2 + str r2, [sp, #20] @ 4-byte Spill + ldr r2, [sp, #16] @ 4-byte Reload + adcs r0, r0, r2 + ldr r2, [sp, #72] @ 4-byte Reload + str r0, [sp, #16] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r1, r0 + ldr r1, [sp, #60] @ 4-byte Reload + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #8] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + umull r6, r5, r3, r0 + umull r0, r4, r3, r1 + umull r1, lr, r3, r2 + adds r1, r7, r1 + adcs r1, lr, r0 + umlal r7, r0, r3, r2 + ldr r2, [sp, #24] @ 4-byte Reload + adcs r1, r4, r6 + adc r6, r5, #0 + adds r3, r2, r9 + ldr r2, [sp, #20] @ 4-byte Reload + adcs r2, r2, r7 + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [sp, #16] @ 4-byte Reload + adcs r0, r2, r0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #12] @ 4-byte Reload + adcs r0, r0, r1 + ldr r1, [sp, #52] @ 4-byte Reload + str r0, [sp, #20] @ 4-byte Spill + ldr r0, [sp, #8] @ 4-byte Reload + adcs r0, r0, r6 + mul r6, r3, r10 + str r0, [sp, #16] @ 4-byte Spill + mov r0, #0 + umull r7, r9, r6, r11 + umull r10, r4, r6, r8 + adc r0, r0, #0 + mov r2, r4 + mov r5, r7 + str r0, [sp, #12] @ 4-byte Spill + ldr r0, [sp, #68] @ 4-byte Reload + umlal r2, r5, r6, r1 + umull r8, r12, r6, r0 + umull lr, r0, r6, r1 + adds r6, r4, lr + adcs r0, r0, r7 + adcs r0, r9, r8 + adc r1, r12, #0 + adds r3, r10, r3 + ldr r3, [sp, #28] @ 4-byte Reload + adcs r2, r2, r3 + ldr r3, [sp, #32] @ 4-byte Reload + str r2, [sp, #28] @ 4-byte Spill + ldr r2, [sp, #24] @ 4-byte Reload + adcs r8, r5, r2 + ldr r2, [sp, #20] @ 4-byte Reload + ldr r5, [sp, #64] @ 4-byte Reload + adcs r9, r0, r2 + ldr r0, [sp, #16] @ 4-byte Reload + ldr r2, [sp, #72] @ 4-byte Reload + umull lr, r7, r3, r5 + ldr r5, [sp, #52] @ 4-byte Reload + adcs r11, r1, r0 + ldr r0, [sp, #12] @ 4-byte Reload + ldr r1, [sp, #60] @ 4-byte Reload + adc r0, r0, #0 + str r0, [sp, #24] @ 4-byte Spill + ldr r0, [sp, #56] @ 4-byte Reload + umull r6, r10, r3, r0 + umull r0, r4, r3, r1 + umull r1, r12, r3, r2 + adds r1, r7, r1 + adcs r1, r12, r0 + umlal r7, r0, r3, r2 + ldr r2, [sp, #28] @ 4-byte Reload + ldr r12, [sp, #68] @ 4-byte Reload + adcs r1, r4, r6 + ldr r4, [sp, #40] @ 4-byte Reload + adc r6, r10, #0 + adds lr, r2, lr + ldr r2, [sp, #48] @ 4-byte Reload + adcs r10, r8, r7 + adcs r0, r9, r0 + str r0, [sp, #72] @ 4-byte Spill + adcs r0, r11, r1 + str r0, [sp, #64] @ 4-byte Spill + ldr r0, [sp, #24] @ 4-byte Reload + adcs r8, r0, r6 + mov r0, #0 + adc r0, r0, #0 + str r0, [sp, #60] @ 4-byte Spill + ldr r0, [sp, #44] @ 4-byte Reload + mul r6, lr, r0 + umull r1, r3, r6, r5 + umull r11, r7, r6, r2 + umull r0, r9, r6, r4 + adds r1, r7, r1 + adcs r1, r3, r0 + umlal r7, r0, r6, r5 + umull r1, r3, r6, r12 + adcs r1, r9, r1 + mov r9, r5 + adc r5, r3, #0 + adds r3, r11, lr + adcs r3, r7, r10 + ldr r7, [sp, #72] @ 4-byte Reload + adcs r0, r0, r7 + ldr r7, [sp, #64] @ 4-byte Reload + adcs r1, r1, r7 + adcs lr, r5, r8 + ldr r5, [sp, #60] @ 4-byte Reload + adc r8, r5, #0 + subs r6, r3, r2 + sbcs r5, r0, r9 + sbcs r4, r1, r4 + sbcs r7, lr, r12 + sbc r2, r8, #0 + ands r2, r2, #1 + movne r5, r0 + ldr r0, [sp, #36] @ 4-byte Reload + movne r6, r3 + movne r4, r1 + cmp r2, #0 + movne r7, lr + str r6, [r0] + str r5, [r0, #4] + str r4, [r0, #8] + str r7, [r0, #12] + add sp, sp, #76 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr +.Lfunc_end53: + .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L + .cantunwind + .fnend + + .globl mcl_fp_montNF4L + .align 2 + .type mcl_fp_montNF4L,%function +mcl_fp_montNF4L: @ @mcl_fp_montNF4L + .fnstart +@ BB#0: + .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} + push {r4, r5, r6, r7, r8, r9, r10, r11, lr} + .pad #140 + sub sp, sp, #140 + mov r10, r3 + str r0, [sp, #132] @ 4-byte Spill + ldr lr, [r1] + ldmib r1, {r4, r8, r12} + ldr r3, [r2] + ldr r1, [r2, #4] + ldr r0, [r2, #8] + ldr r2, [r2, #12] + umull r6, r5, r2, r8 + str r5, [sp, #124] @ 4-byte Spill + umull r5, r7, r2, lr + str r6, [sp, #112] @ 4-byte Spill + str r5, [sp, #128] @ 4-byte Spill + mov r5, r6 + mov r6, r7 + str r7, [sp, #108] @ 4-byte Spill + umlal r6, r5, r2, r4 + str r5, [sp, #120] @ 4-byte Spill + umull r7, r5, r0, r8 + str r6, [sp, #116] @ 4-byte Spill + str r5, [sp, #84] @ 4-byte Spill + umull r5, r6, r0, lr + str r7, [sp, #72] @ 4-byte Spill + str r5, [sp, #88] @ 4-byte Spill + str r6, [sp, #68] @ 4-byte Spill + mov r5, r6 + mov r6, r7 + umlal r5, r6, r0, r4 + str r5, [sp, #76] @ 4-byte Spill + str r6, [sp, #80] @ 4-byte Spill + umull r6, r5, r1, r8 + str r5, [sp, #44] @ 4-byte Spill + umull r5, r7, r1, lr + str r6, [sp, #32] @ 4-byte Spill + str r5, [sp, #48] @ 4-byte Spill + mov r5, r6 + mov r6, r7 + str r7, [sp, #28] @ 4-byte Spill + umlal r6, r5, r1, r4 + str r5, [sp, #40] @ 4-byte Spill + umull r9, r5, r8, r3 + str r6, [sp, #36] @ 4-byte Spill + str r5, [sp, #136] @ 4-byte Spill + umull r6, r5, lr, r3 + mov r8, r9 + str r6, [sp, #4] @ 4-byte Spill + umull r11, r6, r2, r12 + mov lr, r5 + str r6, [sp, #104] @ 4-byte Spill + umull r7, r6, r2, r4 + umlal lr, r8, r4, r3 + str r11, [sp, #100] @ 4-byte Spill + str r6, [sp, #96] @ 4-byte Spill + umull r6, r2, r0, r12 + str r7, [sp, #92] @ 4-byte Spill + str r6, [sp, #60] @ 4-byte Spill + str r2, [sp, #64] @ 4-byte Spill + umull r6, r2, r0, r4 + str r2, [sp, #56] @ 4-byte Spill + umull r2, r0, r1, r12 + str r6, [sp, #52] @ 4-byte Spill + str r2, [sp, #20] @ 4-byte Spill + str r0, [sp, #24] @ 4-byte Spill + umull r2, r0, r1, r4 + str r2, [sp, #12] @ 4-byte Spill + umull r2, r6, r4, r3 + str r0, [sp, #16] @ 4-byte Spill + umull r0, r1, r12, r3 + ldr r4, [r10, #4] + adds r2, r5, r2 + ldr r5, [sp, #4] @ 4-byte Reload + adcs r2, r6, r9 + ldr r9, [r10, #8] + ldr r2, [sp, #136] @ 4-byte Reload + str r4, [sp, #136] @ 4-byte Spill + adcs r12, r2, r0 + ldr r2, [r10, #-4] + adc r0, r1, #0 + str r0, [sp] @ 4-byte Spill + ldr r0, [r10] + mul r1, r5, r2 + mov r7, r2 + umull r3, r11, r1, r0 + str r0, [sp, #8] @ 4-byte Spill + mov r6, r0 + umull r2, r0, r1, r9 + adds r3, r3, r5 + umull r3, r5, r1, r4 + adcs r3, r3, lr + ldr lr, [r10, #12] + adcs r2, r2, r8 + umull r4, r8, r1, lr + adcs r1, r4, r12 + ldr r4, [sp] @ 4-byte Reload + adc r4, r4, #0 + adds r3, r3, r11 + adcs r2, r2, r5 + adcs r12, r1, r0 + ldr r0, [sp, #28] @ 4-byte Reload + adc r1, r4, r8 + ldr r4, [sp, #12] @ 4-byte Reload + adds r4, r0, r4 + ldr r0, [sp, #32] @ 4-byte Reload + ldr r4, [sp, #16] @ 4-byte Reload + adcs r4, r4, r0 + ldr r0, [sp, #44] @ 4-byte Reload + ldr r4, [sp, #20] @ 4-byte Reload + adcs r4, r0, r4 + ldr r0, [sp, #24] @ 4-byte Reload + adc r5, r0, #0 + ldr r0, [sp, #48] @ 4-byte Reload + adds r3, r0, r3 + ldr r0, [sp, #36] @ 4-byte Reload + adcs r2, r0, r2 + ldr r0, [sp, #40] @ 4-byte Reload + adcs r0, r0, r12 + mov r12, r7 + adcs r8, r4, r1 + ldr r1, [sp, #136] @ 4-byte Reload + adc r10, r5, #0 + mul r5, r3, r7 + umull r7, r11, r5, r6 + adds r3, r7, r3 + umull r3, r7, r5, r1 + adcs r2, r3, r2 + umull r3, r4, r5, r9 + adcs r0, r3, r0 + umull r3, r6, r5, lr + adcs r3, r3, r8 + ldr r8, [sp, #8] @ 4-byte Reload + adc r5, r10, #0 + adds r2, r2, r11 + adcs r0, r0, r7 + adcs r3, r3, r4 + ldr r4, [sp, #68] @ 4-byte Reload + adc r7, r5, r6 + ldr r5, [sp, #52] @ 4-byte Reload + ldr r6, [sp, #88] @ 4-byte Reload + adds r4, r4, r5 + ldr r5, [sp, #56] @ 4-byte Reload + ldr r4, [sp, #72] @ 4-byte Reload + adcs r4, r5, r4 + ldr r5, [sp, #60] @ 4-byte Reload + ldr r4, [sp, #84] @ 4-byte Reload + adcs r4, r4, r5 + ldr r5, [sp, #64] @ 4-byte Reload + adc r5, r5, #0 + adds r2, r6, r2 + ldr r6, [sp, #76] @ 4-byte Reload + adcs r0, r6, r0 + ldr r6, [sp, #80] @ 4-byte Reload + adcs r3, r6, r3 + adcs r6, r4, r7 + adc r10, r5, #0 + mul r5, r2, r12 + umull r7, r11, r5, r8 + adds r2, r7, r2 + umull r2, r7, r5, r1 + adcs r0, r2, r0 + umull r2, r4, r5, r9 + adcs r2, r2, r3 + umull r3, r1, r5, lr + adcs r3, r3, r6 + ldr r6, [sp, #128] @ 4-byte Reload + adc r5, r10, #0 + adds r0, r0, r11 + adcs r2, r2, r7 + adcs r3, r3, r4 + ldr r4, [sp, #108] @ 4-byte Reload + adc r1, r5, r1 + ldr r5, [sp, #92] @ 4-byte Reload + adds r4, r4, r5 + ldr r5, [sp, #96] @ 4-byte Reload + ldr r4, [sp, #112] @ 4-byte Reload + adcs r4, r5, r4 + ldr r5, [sp, #100] @ 4-byte Reload + ldr r4, [sp, #124] @ 4-byte Reload + adcs r4, r4, r5 + ldr r5, [sp, #104] @ 4-byte Reload + adc r5, r5, #0 + adds r0, r6, r0 + ldr r6, [sp, #116] @ 4-byte Reload + adcs r2, r6, r2 + ldr r6, [sp, #120] @ 4-byte Reload + adcs r3, r6, r3 + adcs r11, r4, r1 + adc r10, r5, #0 + mul r5, r0, r12 + umull r7, r1, r5, r8 + adds r0, r7, r0 + ldr r7, [sp, #136] @ 4-byte Reload + umull r0, r12, r5, r9 + umull r6, r4, r5, r7 + adcs r2, r6, r2 + adcs r0, r0, r3 + umull r3, r6, r5, lr + adcs r3, r3, r11 + adc r5, r10, #0 + adds r1, r2, r1 + adcs r0, r0, r4 + adcs r2, r3, r12 + adc r3, r5, r6 + subs r4, r1, r8 + sbcs r7, r0, r7 + sbcs r6, r2, r9 + sbc r5, r3, lr + cmp r5, #0 + movlt r7, r0 + ldr r0, [sp, #132] @ 4-byte Reload + movlt r4, r1 + movlt r6, r2 + cmp r5, #0 + movlt r5, r3 + stm r0, {r4, r7} + str r6, [r0, #8] + str r5, [r0, #12] + add sp, sp, #140 + pop {r4, r5, r6, r7, r8, r9, r10, r11, lr} + mov pc, lr |